From c49a8387bd54a64e1d8d1e7d26252fc58533ab90 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 24 Sep 2018 09:29:57 +0200 Subject: [PATCH 001/125] implemented `get_job_document` for AWS IoT --- IMPLEMENTATION_COVERAGE.md | 9199 +++++++++++++++++++----------------- moto/iot/models.py | 3 + moto/iot/responses.py | 10 + tests/test_iot/test_iot.py | 64 +- 4 files changed, 4847 insertions(+), 4429 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 938cc3549..7fbbbcbb0 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4428 +1,4771 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_service_linked_role -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 0% implemented -- [ ] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [ ] get_credentials_for_identity -- [ ] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [ ] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 0% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [ ] admin_disable_user -- [ ] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [ ] change_password -- [ ] confirm_device -- [ ] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [ ] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] list_topics_detection_jobs -- [ ] start_topics_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] start_outbound_voice_contact -- [ ] stop_contact - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 22% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 37% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] put_record -- [ ] put_record_batch -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 0% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [ ] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [ ] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [ ] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [ ] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [ ] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 48% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [X] put_role_policy -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 30% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [X] attach_thing_principal -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [ ] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_attached_policies -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_streams -- [ ] list_targets_for_policy -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing - -## iot-data - 0% implemented -- [ ] delete_thing_shadow -- [ ] get_thing_shadow -- [ ] publish -- [ ] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## kinesis - 56% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 0% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [ ] create_account -- [ ] create_organization -- [ ] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [ ] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [ ] describe_organization -- [ ] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [ ] list_accounts -- [ ] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [ ] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [ ] list_organizational_units_for_parent -- [ ] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [ ] list_roots -- [ ] list_targets_for_policy -- [ ] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] put_event_stream -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 83% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [X] list_lexicons -- [X] put_lexicon -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 41% implemented -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [X] modify_cluster -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] start_notebook_instance -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 27% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_secret_value -- [ ] restore_secret -- [ ] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_protection -- [ ] describe_subscription -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 11% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_tags -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] modify_workspace_properties -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] restore_certificate_authority +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_device_events +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_image_permissions +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_image_permissions +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_image_permissions +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 44% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans +- [ ] update_scaling_plan + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_link_attributes +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_managed_schema_arns +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_link_attributes +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] copy_backup_to_region +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] delete_webhook +- [ ] deregister_webhook_with_third_party +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] list_webhooks +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] put_webhook +- [ ] register_webhook_with_third_party +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 22% implemented +- [X] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [X] get_credentials_for_identity +- [X] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [X] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 25% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [X] admin_create_user +- [X] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [X] admin_get_user +- [X] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [X] change_password +- [ ] confirm_device +- [X] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [X] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [ ] delete_group +- [X] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [X] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [X] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] batch_detect_syntax +- [ ] describe_dominant_language_detection_job +- [ ] describe_entities_detection_job +- [ ] describe_key_phrases_detection_job +- [ ] describe_sentiment_detection_job +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] detect_syntax +- [ ] list_dominant_language_detection_jobs +- [ ] list_entities_detection_jobs +- [ ] list_key_phrases_detection_jobs +- [ ] list_sentiment_detection_jobs +- [ ] list_topics_detection_jobs +- [ ] start_dominant_language_detection_job +- [ ] start_entities_detection_job +- [ ] start_key_phrases_detection_job +- [ ] start_sentiment_detection_job +- [ ] start_topics_detection_job +- [ ] stop_dominant_language_detection_job +- [ ] stop_entities_detection_job +- [ ] stop_key_phrases_detection_job +- [ ] stop_sentiment_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] delete_retention_configuration +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] describe_retention_configurations +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] put_retention_configuration +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] create_user +- [ ] delete_user +- [ ] describe_user +- [ ] describe_user_hierarchy_group +- [ ] describe_user_hierarchy_structure +- [ ] get_federation_token +- [ ] list_routing_profiles +- [ ] list_security_profiles +- [ ] list_user_hierarchy_groups +- [ ] list_users +- [ ] start_outbound_voice_contact +- [ ] stop_contact +- [ ] update_user_hierarchy +- [ ] update_user_identity_info +- [ ] update_user_phone_config +- [ ] update_user_routing_profile +- [ ] update_user_security_profiles + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] create_vpce_configuration +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] delete_vpce_configuration +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] get_vpce_configuration +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] list_vpce_configurations +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project +- [ ] update_vpce_configuration + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dlm - 0% implemented +- [ ] create_lifecycle_policy +- [ ] delete_lifecycle_policy +- [ ] get_lifecycle_policies +- [ ] get_lifecycle_policy +- [ ] update_lifecycle_policy + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] reset_user_password +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 21% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_global_table_settings +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_global_table_settings +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 36% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_fleet +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_fleets +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_fleet_history +- [ ] describe_fleet_instances +- [ ] describe_fleets +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fleet +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups +- [ ] update_file_system + +## eks - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] list_clusters + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_reserved_elasticsearch_instance_offerings +- [ ] describe_reserved_elasticsearch_instances +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] purchase_reserved_elasticsearch_instance_offering +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] list_tags_for_delivery_stream +- [ ] put_record +- [ ] put_record_batch +- [ ] tag_delivery_stream +- [ ] untag_delivery_stream +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_fleet_actions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_fleet_actions +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 6% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [X] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [X] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [X] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [X] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [X] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_filter +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_filter +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_filter +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_filters +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_filter +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 47% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [ ] delete_role_permissions_boundary +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [ ] delete_user_permissions_boundary +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [ ] put_role_permissions_boundary +- [X] put_role_policy +- [ ] put_user_permissions_boundary +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_exclusions_preview +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_exclusions +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_exclusions_preview +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_exclusions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 25% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy +- [X] attach_principal_policy +- [ ] attach_security_profile +- [X] attach_thing_principal +- [ ] cancel_audit_task +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] cancel_job_execution +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_scheduled_audit +- [ ] create_security_profile +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_account_audit_configuration +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_job +- [ ] delete_job_execution +- [ ] delete_ota_update +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_scheduled_audit +- [ ] delete_security_profile +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_account_audit_configuration +- [ ] describe_audit_task +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_scheduled_audit +- [ ] describe_security_profile +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [ ] detach_policy +- [X] detach_principal_policy +- [ ] detach_security_profile +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [X] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_active_violations +- [ ] list_attached_policies +- [ ] list_audit_findings +- [ ] list_audit_tasks +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_scheduled_audits +- [ ] list_security_profiles +- [ ] list_security_profiles_for_target +- [ ] list_streams +- [ ] list_targets_for_policy +- [ ] list_targets_for_security_profile +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] list_violation_events +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_on_demand_audit_task +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_account_audit_configuration +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_scheduled_audit +- [ ] update_security_profile +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing +- [ ] validate_security_profile_behaviors + +## iot-data - 100% implemented +- [X] delete_thing_shadow +- [X] get_thing_shadow +- [X] publish +- [X] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## iot1click-devices - 0% implemented +- [ ] claim_devices_by_claim_code +- [ ] describe_device +- [ ] finalize_device_claim +- [ ] get_device_methods +- [ ] initiate_device_claim +- [ ] invoke_device_method +- [ ] list_device_events +- [ ] list_devices +- [ ] unclaim_device +- [ ] update_device_state + +## iot1click-projects - 0% implemented +- [ ] associate_device_with_placement +- [ ] create_placement +- [ ] create_project +- [ ] delete_placement +- [ ] delete_project +- [ ] describe_placement +- [ ] describe_project +- [ ] disassociate_device_from_placement +- [ ] get_devices_in_placement +- [ ] list_placements +- [ ] list_projects +- [ ] update_placement +- [ ] update_project + +## iotanalytics - 0% implemented +- [ ] batch_put_message +- [ ] cancel_pipeline_reprocessing +- [ ] create_channel +- [ ] create_dataset +- [ ] create_dataset_content +- [ ] create_datastore +- [ ] create_pipeline +- [ ] delete_channel +- [ ] delete_dataset +- [ ] delete_dataset_content +- [ ] delete_datastore +- [ ] delete_pipeline +- [ ] describe_channel +- [ ] describe_dataset +- [ ] describe_datastore +- [ ] describe_logging_options +- [ ] describe_pipeline +- [ ] get_dataset_content +- [ ] list_channels +- [ ] list_datasets +- [ ] list_datastores +- [ ] list_pipelines +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] run_pipeline_activity +- [ ] sample_channel_data +- [ ] start_pipeline_reprocessing +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_channel +- [ ] update_dataset +- [ ] update_datastore +- [ ] update_pipeline + +## kinesis - 46% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] deregister_stream_consumer +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_consumer +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [ ] list_stream_consumers +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [ ] register_stream_consumer +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] subscribe_to_shard +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_hls_streaming_session_url +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## macie - 0% implemented +- [ ] associate_member_account +- [ ] associate_s3_resources +- [ ] disassociate_member_account +- [ ] disassociate_s3_resources +- [ ] list_member_accounts +- [ ] list_s3_resources +- [ ] update_s3_resources + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] delete_reservation +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] describe_offering +- [ ] describe_reservation +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] list_offerings +- [ ] list_reservations +- [ ] purchase_offering +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## mediatailor - 0% implemented +- [ ] delete_playback_configuration +- [ ] get_playback_configuration +- [ ] list_playback_configurations +- [ ] put_playback_configuration + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## neptune - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_parameter_group +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_valid_db_instance_modifications +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] promote_read_replica_db_cluster +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 0% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [ ] create_account +- [ ] create_organization +- [ ] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [ ] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [ ] describe_organization +- [ ] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [ ] list_accounts +- [ ] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [ ] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [ ] list_organizational_units_for_parent +- [ ] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [ ] list_roots +- [ ] list_targets_for_policy +- [ ] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pi - 0% implemented +- [ ] describe_dimension_keys +- [ ] get_resource_metrics + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] delete_user_endpoints +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_channels +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] get_user_endpoints +- [ ] phone_number_validate +- [ ] put_event_stream +- [ ] put_events +- [ ] remove_attributes +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 55% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [ ] get_speech_synthesis_task +- [X] list_lexicons +- [ ] list_speech_synthesis_tasks +- [X] put_lexicon +- [ ] start_speech_synthesis_task +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] backtrack_db_cluster +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_current_db_cluster_capacity +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 38% implemented +- [ ] accept_reserved_node_exchange +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [ ] describe_cluster_db_revisions +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_tracks +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [ ] get_reserved_node_exchange_offerings +- [X] modify_cluster +- [ ] modify_cluster_db_revision +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_hyper_parameter_tuning_job +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] create_transform_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_hyper_parameter_tuning_job +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] describe_transform_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_hyper_parameter_tuning_jobs +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] list_training_jobs_for_hyper_parameter_tuning_job +- [ ] list_transform_jobs +- [ ] start_notebook_instance +- [ ] stop_hyper_parameter_tuning_job +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] stop_transform_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 22% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_resource_policy +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [ ] get_resource_policy +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_resource_policy +- [ ] put_secret_value +- [ ] restore_secret +- [ ] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] associate_drt_log_bucket +- [ ] associate_drt_role +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_drt_access +- [ ] describe_emergency_contact_settings +- [ ] describe_protection +- [ ] describe_subscription +- [ ] disassociate_drt_log_bucket +- [ ] disassociate_drt_role +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections +- [ ] update_emergency_contact_settings +- [ ] update_subscription + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_compatible_images +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 10% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_inventory +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_association_execution_targets +- [ ] describe_association_executions +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_inventory_deletions +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] label_parameter_version +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_associations_once +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_smb_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_smb_file_shares +- [ ] describe_smb_settings +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] join_domain +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] set_smb_guest_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_smb_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] associate_ip_groups +- [ ] authorize_ip_rules +- [ ] create_ip_group +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_ip_group +- [ ] delete_tags +- [ ] describe_ip_groups +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] disassociate_ip_groups +- [ ] modify_workspace_properties +- [ ] modify_workspace_state +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] revoke_ip_rules +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces +- [ ] update_rules_of_ip_group + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_encryption_config +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_encryption_config +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/moto/iot/models.py b/moto/iot/models.py index c36bb985f..931af192a 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -610,6 +610,9 @@ class IoTBackend(BaseBackend): def describe_job(self, job_id): return self.jobs[job_id] + def get_job_document(self, job_id): + return self.jobs[job_id] + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 006c4c4cc..0d3677925 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -149,6 +149,16 @@ class IoTResponse(BaseResponse): targetSelection=job.target_selection ))) + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + def create_keys_and_certificate(self): set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 5c6effd7a..1f2305360 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -import sure # noqa + import boto3 from moto import mock_iot @@ -681,3 +681,65 @@ def test_describe_job_1(): "expiresInSec").which.should.equal(123) job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob1" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps({'foo': 'bar'}), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') From 1c5c5036e364140fe979660b499cce73a344edb7 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 24 Sep 2018 13:04:39 +0200 Subject: [PATCH 002/125] fixing errors on get_job_document --- moto/iot/responses.py | 2 +- tests/test_iot/test_iot.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 0d3677925..c71d4942a 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -153,7 +153,7 @@ class IoTResponse(BaseResponse): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) if job.document is not None: - json.dumps({'document': job.document}) + return json.dumps({'document': job.document}) else: # job.document_source is not None: # TODO: needs to be implemented to get document_source's content from S3 diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 1f2305360..759c7d3c7 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import sure # noqa import boto3 @@ -555,7 +556,10 @@ def test_create_job(): client = boto3.client('iot', region_name='eu-west-1') name = "my-thing" job_id = "TestJob" - # thing + # thing# job document + # job_document = { + # "field": "value" + # } thing = client.create_thing(thingName=name) thing.should.have.key('thingName').which.should.equal(name) thing.should.have.key('thingArn') @@ -718,16 +722,21 @@ def test_get_job_document_with_document_source(): def test_get_job_document_with_document(): client = boto3.client('iot', region_name='eu-west-1') name = "my-thing" - job_id = "TestJob1" + job_id = "TestJob" # thing thing = client.create_thing(thingName=name) thing.should.have.key('thingName').which.should.equal(name) thing.should.have.key('thingArn') + # job document + job_document = { + "field": "value" + } + job = client.create_job( jobId=job_id, targets=[thing["thingArn"]], - document=json.dumps({'foo': 'bar'}), + document=json.dumps(job_document), presignedUrlConfig={ 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', 'expiresInSec': 123 @@ -742,4 +751,4 @@ def test_get_job_document_with_document(): job.should.have.key('jobArn') job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") From 0ba213ffcca58ddb595156161363a6aa294ba71b Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Tue, 16 Oct 2018 15:29:56 +0200 Subject: [PATCH 003/125] Merge branch 'master' of https://github.com/spulec/moto into spulec-master --- .travis.yml | 13 + AUTHORS.md | 1 + CHANGELOG.md | 5 + IMPLEMENTATION_COVERAGE.md | 9199 ++++++++--------- README.md | 2 + docs/index.rst | 10 +- moto/__init__.py | 3 +- moto/backends.py | 2 + moto/cloudformation/parsing.py | 1 + moto/cognitoidentity/responses.py | 5 +- moto/cognitoidentity/utils.py | 2 +- moto/cognitoidp/models.py | 26 +- moto/core/models.py | 11 + moto/dynamodb2/models.py | 13 +- moto/dynamodb2/responses.py | 32 +- moto/ec2/models.py | 18 + moto/ec2/responses/vpc_peering_connections.py | 31 +- moto/ecs/models.py | 8 +- moto/ecs/responses.py | 3 +- moto/elb/responses.py | 26 +- moto/glue/exceptions.py | 59 +- moto/glue/models.py | 104 +- moto/glue/responses.py | 103 +- moto/iam/models.py | 35 +- moto/iam/responses.py | 25 + moto/kms/models.py | 33 +- moto/kms/responses.py | 50 + moto/logs/exceptions.py | 2 +- moto/logs/models.py | 4 +- moto/organizations/__init__.py | 6 + moto/organizations/models.py | 296 + moto/organizations/responses.py | 87 + moto/organizations/urls.py | 10 + moto/organizations/utils.py | 59 + moto/packages/httpretty/core.py | 21 +- moto/rds/models.py | 6 + moto/rds2/models.py | 42 +- moto/rds2/responses.py | 3 +- moto/redshift/models.py | 2 + moto/s3/models.py | 54 +- moto/s3/responses.py | 24 +- moto/secretsmanager/models.py | 51 + moto/secretsmanager/responses.py | 12 + moto/server.py | 3 + moto/ses/models.py | 3 +- moto/sqs/models.py | 20 +- moto/sqs/responses.py | 2 +- moto/ssm/models.py | 102 +- moto/ssm/responses.py | 5 + requirements-dev.txt | 4 +- setup.py | 7 +- .../test_cognitoidentity.py | 14 + tests/test_cognitoidp/test_cognitoidp.py | 14 + tests/test_core/test_decorator_calls.py | 11 + tests/test_dynamodb2/test_dynamodb.py | 52 +- tests/test_ec2/test_elastic_block_store.py | 4 +- tests/test_ec2/test_vpc_peering.py | 39 +- tests/test_ecs/test_ecs_boto3.py | 115 + tests/test_elb/test_elb.py | 34 + tests/test_glue/fixtures/datacatalog.py | 25 + tests/test_glue/helpers.py | 81 +- tests/test_glue/test_datacatalog.py | 362 +- tests/test_iam/test_iam.py | 31 +- tests/test_kms/test_kms.py | 102 +- tests/test_logs/test_logs.py | 14 +- tests/test_organizations/__init__.py | 0 .../organizations_test_utils.py | 136 + .../test_organizations_boto3.py | 322 + tests/test_rds2/test_rds2.py | 155 + tests/test_redshift/test_redshift.py | 4 + tests/test_s3/test_s3.py | 66 + tests/test_s3/test_s3_lifecycle.py | 121 + .../test_secretsmanager.py | 109 +- tests/test_secretsmanager/test_server.py | 286 + tests/test_sqs/test_sqs.py | 40 + tests/test_ssm/test_ssm_boto3.py | 118 +- 76 files changed, 7929 insertions(+), 4971 deletions(-) create mode 100644 moto/organizations/__init__.py create mode 100644 moto/organizations/models.py create mode 100644 moto/organizations/responses.py create mode 100644 moto/organizations/urls.py create mode 100644 moto/organizations/utils.py create mode 100644 tests/test_organizations/__init__.py create mode 100644 tests/test_organizations/organizations_test_utils.py create mode 100644 tests/test_organizations/test_organizations_boto3.py diff --git a/.travis.yml b/.travis.yml index f1b7ac40d..de22818b8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,6 +8,19 @@ python: env: - TEST_SERVER_MODE=false - TEST_SERVER_MODE=true +# Due to incomplete Python 3.7 support on Travis CI ( +# https://github.com/travis-ci/travis-ci/issues/9815), +# using a matrix is necessary +matrix: + include: + - python: 3.7 + env: TEST_SERVER_MODE=false + dist: xenial + sudo: true + - python: 3.7 + env: TEST_SERVER_MODE=true + dist: xenial + sudo: true before_install: - export BOTO_CONFIG=/dev/null install: diff --git a/AUTHORS.md b/AUTHORS.md index 6b7c96291..0a152505a 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -53,3 +53,4 @@ Moto is written by Steve Pulec with contributions from: * [Jim Shields](https://github.com/jimjshields) * [William Richard](https://github.com/william-richard) * [Alex Casalboni](https://github.com/alexcasalboni) +* [Jon Beilke](https://github.com/jrbeilke) diff --git a/CHANGELOG.md b/CHANGELOG.md index 202da6ce6..7f7ee4448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,11 @@ Moto Changelog =================== +1.3.6 +----- + + * Fix boto3 pinning. + 1.3.5 ----- diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7fbbbcbb0..17b864dc3 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4771 +1,4428 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] restore_certificate_authority -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_device_events -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_image_permissions -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_image_permissions -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_image_permissions -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans -- [ ] update_scaling_plan - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_link_attributes -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_managed_schema_arns -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_link_attributes -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] copy_backup_to_region -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] delete_webhook -- [ ] deregister_webhook_with_third_party -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] list_webhooks -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] put_webhook -- [ ] register_webhook_with_third_party -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 22% implemented -- [X] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [X] get_credentials_for_identity -- [X] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [X] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 25% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [X] admin_create_user -- [X] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [ ] admin_disable_user -- [ ] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [X] admin_get_user -- [X] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [X] change_password -- [ ] confirm_device -- [X] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [X] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [X] create_user_pool -- [X] create_user_pool_client -- [X] create_user_pool_domain -- [ ] delete_group -- [X] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [X] delete_user_pool -- [X] delete_user_pool_client -- [X] delete_user_pool_domain -- [X] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [X] describe_user_pool -- [X] describe_user_pool_client -- [X] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [X] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [X] list_user_pool_clients -- [X] list_user_pools -- [X] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [X] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [X] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] batch_detect_syntax -- [ ] describe_dominant_language_detection_job -- [ ] describe_entities_detection_job -- [ ] describe_key_phrases_detection_job -- [ ] describe_sentiment_detection_job -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] detect_syntax -- [ ] list_dominant_language_detection_jobs -- [ ] list_entities_detection_jobs -- [ ] list_key_phrases_detection_jobs -- [ ] list_sentiment_detection_jobs -- [ ] list_topics_detection_jobs -- [ ] start_dominant_language_detection_job -- [ ] start_entities_detection_job -- [ ] start_key_phrases_detection_job -- [ ] start_sentiment_detection_job -- [ ] start_topics_detection_job -- [ ] stop_dominant_language_detection_job -- [ ] stop_entities_detection_job -- [ ] stop_key_phrases_detection_job -- [ ] stop_sentiment_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] delete_retention_configuration -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] describe_retention_configurations -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] put_retention_configuration -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] create_user -- [ ] delete_user -- [ ] describe_user -- [ ] describe_user_hierarchy_group -- [ ] describe_user_hierarchy_structure -- [ ] get_federation_token -- [ ] list_routing_profiles -- [ ] list_security_profiles -- [ ] list_user_hierarchy_groups -- [ ] list_users -- [ ] start_outbound_voice_contact -- [ ] stop_contact -- [ ] update_user_hierarchy -- [ ] update_user_identity_info -- [ ] update_user_phone_config -- [ ] update_user_routing_profile -- [ ] update_user_security_profiles - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] create_vpce_configuration -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] delete_vpce_configuration -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] get_vpce_configuration -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] list_vpce_configurations -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project -- [ ] update_vpce_configuration - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dlm - 0% implemented -- [ ] create_lifecycle_policy -- [ ] delete_lifecycle_policy -- [ ] get_lifecycle_policies -- [ ] get_lifecycle_policy -- [ ] update_lifecycle_policy - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] reset_user_password -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 21% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_global_table_settings -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_global_table_settings -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 36% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_fleet -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_fleets -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_fleet_history -- [ ] describe_fleet_instances -- [ ] describe_fleets -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fleet -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups -- [ ] update_file_system - -## eks - 0% implemented -- [ ] create_cluster -- [ ] delete_cluster -- [ ] describe_cluster -- [ ] list_clusters - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] describe_reserved_elasticsearch_instance_offerings -- [ ] describe_reserved_elasticsearch_instances -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] purchase_reserved_elasticsearch_instance_offering -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] list_tags_for_delivery_stream -- [ ] put_record -- [ ] put_record_batch -- [ ] tag_delivery_stream -- [ ] untag_delivery_stream -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_fleet_actions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_fleet_actions -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 6% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [X] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [X] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [X] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [X] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [X] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_filter -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_filter -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_filter -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_filters -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_filter -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 47% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [ ] delete_role_permissions_boundary -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [ ] delete_user_permissions_boundary -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [ ] put_role_permissions_boundary -- [X] put_role_policy -- [ ] put_user_permissions_boundary -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_exclusions_preview -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_exclusions -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_exclusions_preview -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_exclusions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 25% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [ ] attach_security_profile -- [X] attach_thing_principal -- [ ] cancel_audit_task -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] cancel_job_execution -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_scheduled_audit -- [ ] create_security_profile -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_account_audit_configuration -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_job -- [ ] delete_job_execution -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_scheduled_audit -- [ ] delete_security_profile -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_account_audit_configuration -- [ ] describe_audit_task -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_scheduled_audit -- [ ] describe_security_profile -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [ ] detach_security_profile -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [X] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_active_violations -- [ ] list_attached_policies -- [ ] list_audit_findings -- [ ] list_audit_tasks -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_scheduled_audits -- [ ] list_security_profiles -- [ ] list_security_profiles_for_target -- [ ] list_streams -- [ ] list_targets_for_policy -- [ ] list_targets_for_security_profile -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] list_violation_events -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_on_demand_audit_task -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_account_audit_configuration -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_scheduled_audit -- [ ] update_security_profile -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing -- [ ] validate_security_profile_behaviors - -## iot-data - 100% implemented -- [X] delete_thing_shadow -- [X] get_thing_shadow -- [X] publish -- [X] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## iot1click-devices - 0% implemented -- [ ] claim_devices_by_claim_code -- [ ] describe_device -- [ ] finalize_device_claim -- [ ] get_device_methods -- [ ] initiate_device_claim -- [ ] invoke_device_method -- [ ] list_device_events -- [ ] list_devices -- [ ] unclaim_device -- [ ] update_device_state - -## iot1click-projects - 0% implemented -- [ ] associate_device_with_placement -- [ ] create_placement -- [ ] create_project -- [ ] delete_placement -- [ ] delete_project -- [ ] describe_placement -- [ ] describe_project -- [ ] disassociate_device_from_placement -- [ ] get_devices_in_placement -- [ ] list_placements -- [ ] list_projects -- [ ] update_placement -- [ ] update_project - -## iotanalytics - 0% implemented -- [ ] batch_put_message -- [ ] cancel_pipeline_reprocessing -- [ ] create_channel -- [ ] create_dataset -- [ ] create_dataset_content -- [ ] create_datastore -- [ ] create_pipeline -- [ ] delete_channel -- [ ] delete_dataset -- [ ] delete_dataset_content -- [ ] delete_datastore -- [ ] delete_pipeline -- [ ] describe_channel -- [ ] describe_dataset -- [ ] describe_datastore -- [ ] describe_logging_options -- [ ] describe_pipeline -- [ ] get_dataset_content -- [ ] list_channels -- [ ] list_datasets -- [ ] list_datastores -- [ ] list_pipelines -- [ ] list_tags_for_resource -- [ ] put_logging_options -- [ ] run_pipeline_activity -- [ ] sample_channel_data -- [ ] start_pipeline_reprocessing -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_channel -- [ ] update_dataset -- [ ] update_datastore -- [ ] update_pipeline - -## kinesis - 46% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] deregister_stream_consumer -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_consumer -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [ ] list_stream_consumers -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [ ] register_stream_consumer -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] subscribe_to_shard -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_hls_streaming_session_url -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## macie - 0% implemented -- [ ] associate_member_account -- [ ] associate_s3_resources -- [ ] disassociate_member_account -- [ ] disassociate_s3_resources -- [ ] list_member_accounts -- [ ] list_s3_resources -- [ ] update_s3_resources - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] delete_reservation -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] describe_offering -- [ ] describe_reservation -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] list_offerings -- [ ] list_reservations -- [ ] purchase_offering -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## mediatailor - 0% implemented -- [ ] delete_playback_configuration -- [ ] get_playback_configuration -- [ ] list_playback_configurations -- [ ] put_playback_configuration - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## neptune - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_parameter_group -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_valid_db_instance_modifications -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] promote_read_replica_db_cluster -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 0% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [ ] create_account -- [ ] create_organization -- [ ] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [ ] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [ ] describe_organization -- [ ] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [ ] list_accounts -- [ ] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [ ] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [ ] list_organizational_units_for_parent -- [ ] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [ ] list_roots -- [ ] list_targets_for_policy -- [ ] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pi - 0% implemented -- [ ] describe_dimension_keys -- [ ] get_resource_metrics - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] delete_user_endpoints -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_channels -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] get_user_endpoints -- [ ] phone_number_validate -- [ ] put_event_stream -- [ ] put_events -- [ ] remove_attributes -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 55% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [ ] get_speech_synthesis_task -- [X] list_lexicons -- [ ] list_speech_synthesis_tasks -- [X] put_lexicon -- [ ] start_speech_synthesis_task -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] backtrack_db_cluster -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_backtracks -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_current_db_cluster_capacity -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 38% implemented -- [ ] accept_reserved_node_exchange -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [ ] describe_cluster_db_revisions -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_tracks -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [ ] get_reserved_node_exchange_offerings -- [X] modify_cluster -- [ ] modify_cluster_db_revision -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_hyper_parameter_tuning_job -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] create_transform_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_hyper_parameter_tuning_job -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] describe_transform_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_hyper_parameter_tuning_jobs -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] list_training_jobs_for_hyper_parameter_tuning_job -- [ ] list_transform_jobs -- [ ] start_notebook_instance -- [ ] stop_hyper_parameter_tuning_job -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] stop_transform_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 22% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_resource_policy -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [ ] get_resource_policy -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_resource_policy -- [ ] put_secret_value -- [ ] restore_secret -- [ ] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] associate_drt_log_bucket -- [ ] associate_drt_role -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_drt_access -- [ ] describe_emergency_contact_settings -- [ ] describe_protection -- [ ] describe_subscription -- [ ] disassociate_drt_log_bucket -- [ ] disassociate_drt_role -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections -- [ ] update_emergency_contact_settings -- [ ] update_subscription - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_compatible_images -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 10% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_inventory -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_association_execution_targets -- [ ] describe_association_executions -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_inventory_deletions -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] label_parameter_version -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_associations_once -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_smb_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_smb_file_shares -- [ ] describe_smb_settings -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] join_domain -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] set_smb_guest_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_smb_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] associate_ip_groups -- [ ] authorize_ip_rules -- [ ] create_ip_group -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_ip_group -- [ ] delete_tags -- [ ] describe_ip_groups -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] disassociate_ip_groups -- [ ] modify_workspace_properties -- [ ] modify_workspace_state -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] revoke_ip_rules -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces -- [ ] update_rules_of_ip_group - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_encryption_config -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_encryption_config -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 44% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_service_linked_role +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 0% implemented +- [ ] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [ ] get_credentials_for_identity +- [ ] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [ ] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 0% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [ ] admin_create_user +- [ ] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [ ] admin_get_user +- [ ] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [ ] change_password +- [ ] confirm_device +- [ ] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [ ] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [ ] create_user_pool +- [ ] create_user_pool_client +- [ ] create_user_pool_domain +- [ ] delete_group +- [ ] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [ ] delete_user_pool +- [ ] delete_user_pool_client +- [ ] delete_user_pool_domain +- [ ] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [ ] describe_user_pool +- [ ] describe_user_pool_client +- [ ] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [ ] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [ ] list_user_pool_clients +- [ ] list_user_pools +- [ ] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [ ] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [ ] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 22% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 37% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] put_record +- [ ] put_record_batch +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 0% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [ ] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [ ] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [ ] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [ ] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [ ] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 48% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [X] put_role_policy +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 30% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy +- [X] attach_principal_policy +- [X] attach_thing_principal +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_ota_update +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [ ] detach_policy +- [X] detach_principal_policy +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing + +## iot-data - 0% implemented +- [ ] delete_thing_shadow +- [ ] get_thing_shadow +- [ ] publish +- [ ] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## kinesis - 56% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 30% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [X] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [X] describe_organization +- [X] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [X] list_organizational_units_for_parent +- [X] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [X] list_roots +- [ ] list_targets_for_policy +- [X] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] put_event_stream +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 83% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [X] list_lexicons +- [X] put_lexicon +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 41% implemented +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [X] modify_cluster +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 33% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_secret_value +- [ ] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_protection +- [ ] describe_subscription +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 11% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_tags +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] modify_workspace_properties +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/README.md b/README.md index 8618b4042..791226d6b 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | KMS | @mock_kms | basic endpoints done | |------------------------------------------------------------------------------| +| Organizations | @mock_organizations | some core endpoints done | +|------------------------------------------------------------------------------| | Polly | @mock_polly | all endpoints done | |------------------------------------------------------------------------------| | RDS | @mock_rds | core endpoints done | diff --git a/docs/index.rst b/docs/index.rst index 321342401..66e12e4bd 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -34,11 +34,11 @@ Currently implemented Services: | - DynamoDB2 | - @mock_dynamodb2 | - core endpoints + partial indexes| +-----------------------+---------------------+-----------------------------------+ | EC2 | @mock_ec2 | core endpoints done | -| - AMI | | core endpoints done | -| - EBS | | core endpoints done | -| - Instances | | all endpoints done | -| - Security Groups | | core endpoints done | -| - Tags | | all endpoints done | +| - AMI | | - core endpoints done | +| - EBS | | - core endpoints done | +| - Instances | | - all endpoints done | +| - Security Groups | | - core endpoints done | +| - Tags | | - all endpoints done | +-----------------------+---------------------+-----------------------------------+ | ECS | @mock_ecs | basic endpoints done | +-----------------------+---------------------+-----------------------------------+ diff --git a/moto/__init__.py b/moto/__init__.py index b7b653200..6992c535e 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.3.5' +__version__ = '1.3.6' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -28,6 +28,7 @@ from .glue import mock_glue # flake8: noqa from .iam import mock_iam, mock_iam_deprecated # flake8: noqa from .kinesis import mock_kinesis, mock_kinesis_deprecated # flake8: noqa from .kms import mock_kms, mock_kms_deprecated # flake8: noqa +from .organizations import mock_organizations # flake8: noqa from .opsworks import mock_opsworks, mock_opsworks_deprecated # flake8: noqa from .polly import mock_polly # flake8: noqa from .rds import mock_rds, mock_rds_deprecated # flake8: noqa diff --git a/moto/backends.py b/moto/backends.py index 8d707373f..d95424385 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -27,6 +27,7 @@ from moto.kinesis import kinesis_backends from moto.kms import kms_backends from moto.logs import logs_backends from moto.opsworks import opsworks_backends +from moto.organizations import organizations_backends from moto.polly import polly_backends from moto.rds2 import rds2_backends from moto.redshift import redshift_backends @@ -74,6 +75,7 @@ BACKENDS = { 'kinesis': kinesis_backends, 'kms': kms_backends, 'opsworks': opsworks_backends, + 'organizations': organizations_backends, 'polly': polly_backends, 'redshift': redshift_backends, 'rds': rds2_backends, diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index c4059a06b..35b05d101 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -387,6 +387,7 @@ class ResourceMap(collections.Mapping): "AWS::StackName": stack_name, "AWS::URLSuffix": "amazonaws.com", "AWS::NoValue": None, + "AWS::Partition": "aws", } def __getitem__(self, key): diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py index ea54b2cff..e7b428329 100644 --- a/moto/cognitoidentity/responses.py +++ b/moto/cognitoidentity/responses.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse from .models import cognitoidentity_backends +from .utils import get_random_identity_id class CognitoIdentityResponse(BaseResponse): @@ -31,4 +32,6 @@ class CognitoIdentityResponse(BaseResponse): return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) def get_open_id_token_for_developer_identity(self): - return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity( + self._get_param('IdentityId') or get_random_identity_id(self.region) + ) diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py index 359631763..6143d5121 100644 --- a/moto/cognitoidentity/utils.py +++ b/moto/cognitoidentity/utils.py @@ -2,4 +2,4 @@ from moto.core.utils import get_random_hex def get_random_identity_id(region): - return "{0}:{0}".format(region, get_random_hex(length=19)) + return "{0}:{1}".format(region, get_random_hex(length=19)) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 52a73f89f..10da0c6ff 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -24,7 +24,7 @@ class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): self.region = region - self.id = str(uuid.uuid4()) + self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) self.name = name self.status = None self.extended_config = extended_config or {} @@ -84,7 +84,11 @@ class CognitoIdpUserPool(BaseModel): return refresh_token def create_access_token(self, client_id, username): - access_token, expires_in = self.create_jwt(client_id, username) + extra_data = self.get_user_extra_data_by_client_id( + client_id, username + ) + access_token, expires_in = self.create_jwt(client_id, username, + extra_data=extra_data) self.access_tokens[access_token] = (client_id, username) return access_token, expires_in @@ -97,6 +101,21 @@ class CognitoIdpUserPool(BaseModel): id_token, _ = self.create_id_token(client_id, username) return access_token, id_token, expires_in + def get_user_extra_data_by_client_id(self, client_id, username): + extra_data = {} + current_client = self.clients.get(client_id, None) + if current_client: + for readable_field in current_client.get_readable_fields(): + attribute = list(filter( + lambda f: f['Name'] == readable_field, + self.users.get(username).attributes + )) + if len(attribute) > 0: + extra_data.update({ + attribute[0]['Name']: attribute[0]['Value'] + }) + return extra_data + class CognitoIdpUserPoolDomain(BaseModel): @@ -138,6 +157,9 @@ class CognitoIdpUserPoolClient(BaseModel): return user_pool_client_json + def get_readable_fields(self): + return self.extended_config.get('ReadAttributes', []) + class CognitoIdpIdentityProvider(BaseModel): diff --git a/moto/core/models.py b/moto/core/models.py index 92dc2a980..adc06a9c0 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -89,6 +89,17 @@ class BaseMockAWS(object): if inspect.ismethod(attr_value) and attr_value.__self__ is klass: continue + # Check if this is a staticmethod. If so, skip patching + for cls in inspect.getmro(klass): + if attr_value.__name__ not in cls.__dict__: + continue + bound_attr_value = cls.__dict__[attr_value.__name__] + if not isinstance(bound_attr_value, staticmethod): + break + else: + # It is a staticmethod, skip patching + continue + try: setattr(klass, attr, self(attr_value, reset=False)) except TypeError: diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index b327c7a4b..63ad20df6 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -154,7 +154,7 @@ class Item(BaseModel): # If not exists, changes value to a default if needed, else its the same as it was if value.startswith('if_not_exists'): # Function signature - match = re.match(r'.*if_not_exists\((?P.+),\s*(?P.+)\).*', value) + match = re.match(r'.*if_not_exists\s*\((?P.+),\s*(?P.+)\).*', value) if not match: raise TypeError @@ -162,12 +162,13 @@ class Item(BaseModel): # If it already exists, get its value so we dont overwrite it if path in self.attrs: - value = self.attrs[path].cast_value + value = self.attrs[path] - if value in expression_attribute_values: - value = DynamoType(expression_attribute_values[value]) - else: - value = DynamoType({"S": value}) + if type(value) != DynamoType: + if value in expression_attribute_values: + value = DynamoType(expression_attribute_values[value]) + else: + value = DynamoType({"S": value}) if '.' not in key: self.attrs[key] = value diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 3c7e7ffc2..e2f1ef1cc 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -20,6 +20,17 @@ def has_empty_keys_or_values(_dict): ) +def get_empty_str_error(): + er = 'com.amazonaws.dynamodb.v20111205#ValidationException' + return (400, + {'server': 'amazon.com'}, + dynamo_json_dump({'__type': er, + 'message': ('One or more parameter values were ' + 'invalid: An AttributeValue may not ' + 'contain an empty string')} + )) + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -174,14 +185,7 @@ class DynamoHandler(BaseResponse): item = self.body['Item'] if has_empty_keys_or_values(item): - er = 'com.amazonaws.dynamodb.v20111205#ValidationException' - return (400, - {'server': 'amazon.com'}, - dynamo_json_dump({'__type': er, - 'message': ('One or more parameter values were ' - 'invalid: An AttributeValue may not ' - 'contain an empty string')} - )) + return get_empty_str_error() overwrite = 'Expected' not in self.body if not overwrite: @@ -200,9 +204,9 @@ class DynamoHandler(BaseResponse): if cond_items: expected = {} overwrite = False - exists_re = re.compile('^attribute_exists\((.*)\)$') + exists_re = re.compile('^attribute_exists\s*\((.*)\)$') not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') + '^attribute_not_exists\s*\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) @@ -523,6 +527,7 @@ class DynamoHandler(BaseResponse): return dynamo_json_dump(item_dict) def update_item(self): + name = self.body['TableName'] key = self.body['Key'] update_expression = self.body.get('UpdateExpression') @@ -533,6 +538,9 @@ class DynamoHandler(BaseResponse): 'ExpressionAttributeValues', {}) existing_item = self.dynamodb_backend.get_item(name, key) + if has_empty_keys_or_values(expression_attribute_values): + return get_empty_str_error() + if 'Expected' in self.body: expected = self.body['Expected'] else: @@ -548,9 +556,9 @@ class DynamoHandler(BaseResponse): if cond_items: expected = {} - exists_re = re.compile('^attribute_exists\((.*)\)$') + exists_re = re.compile('^attribute_exists\s*\((.*)\)$') not_exists_re = re.compile( - '^attribute_not_exists\((.*)\)$') + '^attribute_not_exists\s*\((.*)\)$') for cond in cond_items: exists_m = exists_re.match(cond) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 4e26f0f65..b94cac479 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -13,6 +13,7 @@ from pkg_resources import resource_filename import boto.ec2 from collections import defaultdict +import weakref from datetime import datetime from boto.ec2.instance import Instance as BotoInstance, Reservation from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType @@ -2115,10 +2116,20 @@ class VPC(TaggedEC2Resource): class VPCBackend(object): + __refs__ = defaultdict(list) + def __init__(self): self.vpcs = {} + self.__refs__[self.__class__].append(weakref.ref(self)) super(VPCBackend, self).__init__() + @classmethod + def get_instances(cls): + for inst_ref in cls.__refs__[cls]: + inst = inst_ref() + if inst is not None: + yield inst + def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) @@ -2142,6 +2153,13 @@ class VPCBackend(object): raise InvalidVPCIdError(vpc_id) return self.vpcs.get(vpc_id) + # get vpc by vpc id and aws region + def get_cross_vpc(self, vpc_id, peer_region): + for vpcs in self.get_instances(): + if vpcs.region_name == peer_region: + match_vpc = vpcs.get_vpc(vpc_id) + return match_vpc + def get_all_vpcs(self, vpc_ids=None, filters=None): matches = self.vpcs.values() if vpc_ids: diff --git a/moto/ec2/responses/vpc_peering_connections.py b/moto/ec2/responses/vpc_peering_connections.py index 1bccce4f6..49d752893 100644 --- a/moto/ec2/responses/vpc_peering_connections.py +++ b/moto/ec2/responses/vpc_peering_connections.py @@ -5,8 +5,12 @@ from moto.core.responses import BaseResponse class VPCPeeringConnections(BaseResponse): def create_vpc_peering_connection(self): + peer_region = self._get_param('PeerRegion') + if peer_region == self.region or peer_region is None: + peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) + else: + peer_vpc = self.ec2_backend.get_cross_vpc(self._get_param('PeerVpcId'), peer_region) vpc = self.ec2_backend.get_vpc(self._get_param('VpcId')) - peer_vpc = self.ec2_backend.get_vpc(self._get_param('PeerVpcId')) vpc_pcx = self.ec2_backend.create_vpc_peering_connection(vpc, peer_vpc) template = self.response_template( CREATE_VPC_PEERING_CONNECTION_RESPONSE) @@ -41,26 +45,31 @@ class VPCPeeringConnections(BaseResponse): CREATE_VPC_PEERING_CONNECTION_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE - - {{ vpc_pcx.id }} + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + {{ vpc_pcx.id }} - 777788889999 - {{ vpc_pcx.vpc.id }} - {{ vpc_pcx.vpc.cidr_block }} + 777788889999 + {{ vpc_pcx.vpc.id }} + {{ vpc_pcx.vpc.cidr_block }} + + false + false + false + 123456789012 {{ vpc_pcx.peer_vpc.id }} - initiating-request - Initiating request to {accepter ID}. + initiating-request + Initiating Request to {accepter ID} 2014-02-18T14:37:25.000Z - + """ diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 55fb4d4d9..d00853843 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -179,7 +179,7 @@ class Task(BaseObject): class Service(BaseObject): - def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None): + def __init__(self, cluster, service_name, task_definition, desired_count, load_balancers=None, scheduling_strategy=None): self.cluster_arn = cluster.arn self.arn = 'arn:aws:ecs:us-east-1:012345678910:service/{0}'.format( service_name) @@ -202,6 +202,7 @@ class Service(BaseObject): } ] self.load_balancers = load_balancers if load_balancers is not None else [] + self.scheduling_strategy = scheduling_strategy if scheduling_strategy is not None else 'REPLICA' self.pending_count = 0 @property @@ -214,6 +215,7 @@ class Service(BaseObject): del response_object['name'], response_object['arn'] response_object['serviceName'] = self.name response_object['serviceArn'] = self.arn + response_object['schedulingStrategy'] = self.scheduling_strategy for deployment in response_object['deployments']: if isinstance(deployment['createdAt'], datetime): @@ -655,7 +657,7 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("Could not find task {} on cluster {}".format( task_str, cluster_name)) - def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None): + def create_service(self, cluster_str, service_name, task_definition_str, desired_count, load_balancers=None, scheduling_strategy=None): cluster_name = cluster_str.split('/')[-1] if cluster_name in self.clusters: cluster = self.clusters[cluster_name] @@ -665,7 +667,7 @@ class EC2ContainerServiceBackend(BaseBackend): desired_count = desired_count if desired_count is not None else 0 service = Service(cluster, service_name, - task_definition, desired_count, load_balancers) + task_definition, desired_count, load_balancers, scheduling_strategy) cluster_service_pair = '{0}:{1}'.format(cluster_name, service_name) self.services[cluster_service_pair] = service diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 9455d7a28..e0bfefc02 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -154,8 +154,9 @@ class EC2ContainerServiceResponse(BaseResponse): task_definition_str = self._get_param('taskDefinition') desired_count = self._get_int_param('desiredCount') load_balancers = self._get_param('loadBalancers') + scheduling_strategy = self._get_param('schedulingStrategy') service = self.ecs_backend.create_service( - cluster_str, service_name, task_definition_str, desired_count, load_balancers) + cluster_str, service_name, task_definition_str, desired_count, load_balancers, scheduling_strategy) return json.dumps({ 'service': service.response_object }) diff --git a/moto/elb/responses.py b/moto/elb/responses.py index 40d6ec2f9..b512f56e9 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -259,12 +259,22 @@ class ELBResponse(BaseResponse): def describe_instance_health(self): load_balancer_name = self._get_param('LoadBalancerName') - instance_ids = [list(param.values())[0] for param in self._get_list_prefix('Instances.member')] - if len(instance_ids) == 0: - instance_ids = self.elb_backend.get_load_balancer( - load_balancer_name).instance_ids + provided_instance_ids = [ + list(param.values())[0] + for param in self._get_list_prefix('Instances.member') + ] + registered_instances_id = self.elb_backend.get_load_balancer( + load_balancer_name).instance_ids + if len(provided_instance_ids) == 0: + provided_instance_ids = registered_instances_id template = self.response_template(DESCRIBE_INSTANCE_HEALTH_TEMPLATE) - return template.render(instance_ids=instance_ids) + instances = [] + for instance_id in provided_instance_ids: + state = "InService" \ + if instance_id in registered_instances_id\ + else "Unknown" + instances.append({"InstanceId": instance_id, "State": state}) + return template.render(instances=instances) def add_tags(self): @@ -689,11 +699,11 @@ SET_LOAD_BALANCER_POLICIES_FOR_BACKEND_SERVER_TEMPLATE = """ - {% for instance_id in instance_ids %} + {% for instance in instances %} N/A - {{ instance_id }} - InService + {{ instance['InstanceId'] }} + {{ instance['State'] }} N/A {% endfor %} diff --git a/moto/glue/exceptions.py b/moto/glue/exceptions.py index 62ea1525c..8972adb35 100644 --- a/moto/glue/exceptions.py +++ b/moto/glue/exceptions.py @@ -6,19 +6,56 @@ class GlueClientError(JsonRESTError): code = 400 -class DatabaseAlreadyExistsException(GlueClientError): - def __init__(self): - self.code = 400 - super(DatabaseAlreadyExistsException, self).__init__( - 'DatabaseAlreadyExistsException', - 'Database already exists.' +class AlreadyExistsException(GlueClientError): + def __init__(self, typ): + super(GlueClientError, self).__init__( + 'AlreadyExistsException', + '%s already exists.' % (typ), ) -class TableAlreadyExistsException(GlueClientError): +class DatabaseAlreadyExistsException(AlreadyExistsException): def __init__(self): - self.code = 400 - super(TableAlreadyExistsException, self).__init__( - 'TableAlreadyExistsException', - 'Table already exists.' + super(DatabaseAlreadyExistsException, self).__init__('Database') + + +class TableAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(TableAlreadyExistsException, self).__init__('Table') + + +class PartitionAlreadyExistsException(AlreadyExistsException): + def __init__(self): + super(PartitionAlreadyExistsException, self).__init__('Partition') + + +class EntityNotFoundException(GlueClientError): + def __init__(self, msg): + super(GlueClientError, self).__init__( + 'EntityNotFoundException', + msg, ) + + +class DatabaseNotFoundException(EntityNotFoundException): + def __init__(self, db): + super(DatabaseNotFoundException, self).__init__( + 'Database %s not found.' % db, + ) + + +class TableNotFoundException(EntityNotFoundException): + def __init__(self, tbl): + super(TableNotFoundException, self).__init__( + 'Table %s not found.' % tbl, + ) + + +class PartitionNotFoundException(EntityNotFoundException): + def __init__(self): + super(PartitionNotFoundException, self).__init__("Cannot find partition.") + + +class VersionNotFoundException(EntityNotFoundException): + def __init__(self): + super(VersionNotFoundException, self).__init__("Version not found.") diff --git a/moto/glue/models.py b/moto/glue/models.py index 09b7d60ed..bcf2ec4bf 100644 --- a/moto/glue/models.py +++ b/moto/glue/models.py @@ -1,8 +1,19 @@ from __future__ import unicode_literals +import time + from moto.core import BaseBackend, BaseModel from moto.compat import OrderedDict -from.exceptions import DatabaseAlreadyExistsException, TableAlreadyExistsException +from.exceptions import ( + JsonRESTError, + DatabaseAlreadyExistsException, + DatabaseNotFoundException, + TableAlreadyExistsException, + TableNotFoundException, + PartitionAlreadyExistsException, + PartitionNotFoundException, + VersionNotFoundException, +) class GlueBackend(BaseBackend): @@ -19,7 +30,10 @@ class GlueBackend(BaseBackend): return database def get_database(self, database_name): - return self.databases[database_name] + try: + return self.databases[database_name] + except KeyError: + raise DatabaseNotFoundException(database_name) def create_table(self, database_name, table_name, table_input): database = self.get_database(database_name) @@ -33,7 +47,10 @@ class GlueBackend(BaseBackend): def get_table(self, database_name, table_name): database = self.get_database(database_name) - return database.tables[table_name] + try: + return database.tables[table_name] + except KeyError: + raise TableNotFoundException(table_name) def get_tables(self, database_name): database = self.get_database(database_name) @@ -52,9 +69,84 @@ class FakeTable(BaseModel): def __init__(self, database_name, table_name, table_input): self.database_name = database_name self.name = table_name - self.table_input = table_input - self.storage_descriptor = self.table_input.get('StorageDescriptor', {}) - self.partition_keys = self.table_input.get('PartitionKeys', []) + self.partitions = OrderedDict() + self.versions = [] + self.update(table_input) + + def update(self, table_input): + self.versions.append(table_input) + + def get_version(self, ver): + try: + if not isinstance(ver, int): + # "1" goes to [0] + ver = int(ver) - 1 + except ValueError as e: + raise JsonRESTError("InvalidInputException", str(e)) + + try: + return self.versions[ver] + except IndexError: + raise VersionNotFoundException() + + def as_dict(self, version=-1): + obj = { + 'DatabaseName': self.database_name, + 'Name': self.name, + } + obj.update(self.get_version(version)) + return obj + + def create_partition(self, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if key in self.partitions: + raise PartitionAlreadyExistsException() + self.partitions[str(partition.values)] = partition + + def get_partitions(self): + return [p for str_part_values, p in self.partitions.items()] + + def get_partition(self, values): + try: + return self.partitions[str(values)] + except KeyError: + raise PartitionNotFoundException() + + def update_partition(self, old_values, partiton_input): + partition = FakePartition(self.database_name, self.name, partiton_input) + key = str(partition.values) + if old_values == partiton_input['Values']: + # Altering a partition in place. Don't remove it so the order of + # returned partitions doesn't change + if key not in self.partitions: + raise PartitionNotFoundException() + else: + removed = self.partitions.pop(str(old_values), None) + if removed is None: + raise PartitionNotFoundException() + if key in self.partitions: + # Trying to update to overwrite a partition that exists + raise PartitionAlreadyExistsException() + self.partitions[key] = partition + + +class FakePartition(BaseModel): + def __init__(self, database_name, table_name, partiton_input): + self.creation_time = time.time() + self.database_name = database_name + self.table_name = table_name + self.partition_input = partiton_input + self.values = self.partition_input.get('Values', []) + + def as_dict(self): + obj = { + 'DatabaseName': self.database_name, + 'TableName': self.table_name, + 'CreationTime': self.creation_time, + } + obj.update(self.partition_input) + return obj glue_backend = GlueBackend() diff --git a/moto/glue/responses.py b/moto/glue/responses.py index bb64c40d4..84cc6f901 100644 --- a/moto/glue/responses.py +++ b/moto/glue/responses.py @@ -37,27 +37,94 @@ class GlueResponse(BaseResponse): database_name = self.parameters.get('DatabaseName') table_name = self.parameters.get('Name') table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({'Table': table.as_dict()}) + + def update_table(self): + database_name = self.parameters.get('DatabaseName') + table_input = self.parameters.get('TableInput') + table_name = table_input.get('Name') + table = self.glue_backend.get_table(database_name, table_name) + table.update(table_input) + return "" + + def get_table_versions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + return json.dumps({ - 'Table': { - 'DatabaseName': table.database_name, - 'Name': table.name, - 'PartitionKeys': table.partition_keys, - 'StorageDescriptor': table.storage_descriptor - } + "TableVersions": [ + { + "Table": table.as_dict(version=n), + "VersionId": str(n + 1), + } for n in range(len(table.versions)) + ], + }) + + def get_table_version(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + table = self.glue_backend.get_table(database_name, table_name) + ver_id = self.parameters.get('VersionId') + + return json.dumps({ + "TableVersion": { + "Table": table.as_dict(version=ver_id), + "VersionId": ver_id, + }, }) def get_tables(self): database_name = self.parameters.get('DatabaseName') tables = self.glue_backend.get_tables(database_name) - return json.dumps( - { - 'TableList': [ - { - 'DatabaseName': table.database_name, - 'Name': table.name, - 'PartitionKeys': table.partition_keys, - 'StorageDescriptor': table.storage_descriptor - } for table in tables - ] - } - ) + return json.dumps({ + 'TableList': [ + table.as_dict() for table in tables + ] + }) + + def get_partitions(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + if 'Expression' in self.parameters: + raise NotImplementedError("Expression filtering in get_partitions is not implemented in moto") + table = self.glue_backend.get_table(database_name, table_name) + + return json.dumps({ + 'Partitions': [ + p.as_dict() for p in table.get_partitions() + ] + }) + + def get_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + values = self.parameters.get('PartitionValues') + + table = self.glue_backend.get_table(database_name, table_name) + + p = table.get_partition(values) + + return json.dumps({'Partition': p.as_dict()}) + + def create_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + + table = self.glue_backend.get_table(database_name, table_name) + table.create_partition(part_input) + + return "" + + def update_partition(self): + database_name = self.parameters.get('DatabaseName') + table_name = self.parameters.get('TableName') + part_input = self.parameters.get('PartitionInput') + part_to_update = self.parameters.get('PartitionValueList') + + table = self.glue_backend.get_table(database_name, table_name) + table.update_partition(part_to_update, part_input) + + return "" diff --git a/moto/iam/models.py b/moto/iam/models.py index 697be7988..4d884fa2f 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -37,7 +37,6 @@ class Policy(BaseModel): description=None, document=None, path=None): - self.document = document or {} self.name = name self.attachment_count = 0 @@ -45,7 +44,7 @@ class Policy(BaseModel): self.id = random_policy_id() self.path = path or '/' self.default_version_id = default_version_id or 'v1' - self.versions = [] + self.versions = [PolicyVersion(self.arn, document, True)] self.create_datetime = datetime.now(pytz.utc) self.update_datetime = datetime.now(pytz.utc) @@ -72,11 +71,11 @@ class ManagedPolicy(Policy): def attach_to(self, obj): self.attachment_count += 1 - obj.managed_policies[self.name] = self + obj.managed_policies[self.arn] = self def detach_from(self, obj): self.attachment_count -= 1 - del obj.managed_policies[self.name] + del obj.managed_policies[self.arn] @property def arn(self): @@ -477,11 +476,13 @@ class IAMBackend(BaseBackend): document=policy_document, path=path, ) - self.managed_policies[policy.name] = policy + self.managed_policies[policy.arn] = policy return policy - def get_policy(self, policy_name): - return self.managed_policies.get(policy_name) + def get_policy(self, policy_arn): + if policy_arn not in self.managed_policies: + raise IAMNotFoundException("Policy {0} not found".format(policy_arn)) + return self.managed_policies.get(policy_arn) def list_attached_role_policies(self, role_name, marker=None, max_items=100, path_prefix='/'): policies = self.get_role(role_name).managed_policies.values() @@ -575,21 +576,18 @@ class IAMBackend(BaseBackend): return role.policies.keys() def create_policy_version(self, policy_arn, policy_document, set_as_default): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") version = PolicyVersion(policy_arn, policy_document, set_as_default) policy.versions.append(version) + version.version_id = 'v{0}'.format(len(policy.versions)) if set_as_default: policy.default_version_id = version.version_id return version def get_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") for version in policy.versions: @@ -598,19 +596,18 @@ class IAMBackend(BaseBackend): raise IAMNotFoundException("Policy version not found") def list_policy_versions(self, policy_arn): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") return policy.versions def delete_policy_version(self, policy_arn, version_id): - policy_name = policy_arn.split(':')[-1] - policy_name = policy_name.split('/')[1] - policy = self.get_policy(policy_name) + policy = self.get_policy(policy_arn) if not policy: raise IAMNotFoundException("Policy not found") + if version_id == policy.default_version_id: + raise IAMConflictException( + "Cannot delete the default version of a policy") for i, v in enumerate(policy.versions): if v.version_id == version_id: del policy.versions[i] diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 9c1241c36..9e8d21396 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -58,6 +58,12 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_POLICY_TEMPLATE) return template.render(policy=policy) + def get_policy(self): + policy_arn = self._get_param('PolicyArn') + policy = iam_backend.get_policy(policy_arn) + template = self.response_template(GET_POLICY_TEMPLATE) + return template.render(policy=policy) + def list_attached_role_policies(self): marker = self._get_param('Marker') max_items = self._get_int_param('MaxItems', 100) @@ -601,6 +607,25 @@ CREATE_POLICY_TEMPLATE = """ """ +GET_POLICY_TEMPLATE = """ + + + {{ policy.name }} + {{ policy.description }} + {{ policy.default_version_id }} + {{ policy.id }} + {{ policy.path }} + {{ policy.arn }} + {{ policy.attachment_count }} + {{ policy.create_datetime.isoformat() }} + {{ policy.update_datetime.isoformat() }} + + + + 684f0917-3d22-11e4-a4a0-cffb9EXAMPLE + +""" + LIST_ATTACHED_ROLE_POLICIES_TEMPLATE = """ {% if marker is none %} diff --git a/moto/kms/models.py b/moto/kms/models.py index 89ebf0082..bb39d1b24 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -2,8 +2,10 @@ from __future__ import unicode_literals import boto.kms from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_without_milliseconds from .utils import generate_key_id from collections import defaultdict +from datetime import datetime, timedelta class Key(BaseModel): @@ -12,11 +14,13 @@ class Key(BaseModel): self.id = generate_key_id() self.policy = policy self.key_usage = key_usage + self.key_state = "Enabled" self.description = description self.enabled = True self.region = region self.account_id = "0123456789012" self.key_rotation_status = False + self.deletion_date = None @property def physical_resource_id(self): @@ -27,7 +31,7 @@ class Key(BaseModel): return "arn:aws:kms:{0}:{1}:key/{2}".format(self.region, self.account_id, self.id) def to_dict(self): - return { + key_dict = { "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, @@ -36,8 +40,12 @@ class Key(BaseModel): "Enabled": self.enabled, "KeyId": self.id, "KeyUsage": self.key_usage, + "KeyState": self.key_state, } } + if self.key_state == 'PendingDeletion': + key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date) + return key_dict def delete(self, region_name): kms_backends[region_name].delete_key(self.id) @@ -138,6 +146,29 @@ class KmsBackend(BaseBackend): def get_key_policy(self, key_id): return self.keys[self.get_key_id(key_id)].policy + def disable_key(self, key_id): + if key_id in self.keys: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'Disabled' + + def enable_key(self, key_id): + if key_id in self.keys: + self.keys[key_id].enabled = True + self.keys[key_id].key_state = 'Enabled' + + def cancel_key_deletion(self, key_id): + if key_id in self.keys: + self.keys[key_id].key_state = 'Disabled' + self.keys[key_id].deletion_date = None + + def schedule_key_deletion(self, key_id, pending_window_in_days): + if key_id in self.keys: + if 7 <= pending_window_in_days <= 30: + self.keys[key_id].enabled = False + self.keys[key_id].key_state = 'PendingDeletion' + self.keys[key_id].deletion_date = datetime.now() + timedelta(days=pending_window_in_days) + return iso_8601_datetime_without_milliseconds(self.keys[key_id].deletion_date) + kms_backends = {} for region in boto.kms.regions(): diff --git a/moto/kms/responses.py b/moto/kms/responses.py index 0f544e954..5883f51ec 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -233,6 +233,56 @@ class KmsResponse(BaseResponse): value = self.parameters.get("CiphertextBlob") return json.dumps({"Plaintext": base64.b64decode(value).decode("utf-8")}) + def disable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.disable_key(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps(None) + + def enable_key(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.enable_key(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps(None) + + def cancel_key_deletion(self): + key_id = self.parameters.get('KeyId') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + self.kms_backend.cancel_key_deletion(key_id) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + return json.dumps({'KeyId': key_id}) + + def schedule_key_deletion(self): + key_id = self.parameters.get('KeyId') + if self.parameters.get('PendingWindowInDays') is None: + pending_window_in_days = 30 + else: + pending_window_in_days = self.parameters.get('PendingWindowInDays') + _assert_valid_key_id(self.kms_backend.get_key_id(key_id)) + try: + return json.dumps({ + 'KeyId': key_id, + 'DeletionDate': self.kms_backend.schedule_key_deletion(key_id, pending_window_in_days) + }) + except KeyError: + raise JSONResponseError(404, 'Not Found', body={ + 'message': "Key 'arn:aws:kms:{region}:012345678912:key/{key_id}' does not exist".format(region=self.region, key_id=key_id), + '__type': 'NotFoundException'}) + def _assert_valid_key_id(key_id): if not re.match(r'^[A-F0-9]{8}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{4}-[A-F0-9]{12}$', key_id, re.IGNORECASE): diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py index cc83452ea..bb02eced3 100644 --- a/moto/logs/exceptions.py +++ b/moto/logs/exceptions.py @@ -29,5 +29,5 @@ class ResourceAlreadyExistsException(LogsClientError): self.code = 400 super(ResourceAlreadyExistsException, self).__init__( 'ResourceAlreadyExistsException', - 'The specified resource already exists.' + 'The specified log group already exists' ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 3e1c7b955..ca1fdc4ad 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -19,7 +19,7 @@ class LogEvent: def to_filter_dict(self): return { - "eventId": self.eventId, + "eventId": str(self.eventId), "ingestionTime": self.ingestionTime, # "logStreamName": "message": self.message, @@ -86,7 +86,7 @@ class LogStream: self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events] self.uploadSequenceToken += 1 - return self.uploadSequenceToken + return '{:056d}'.format(self.uploadSequenceToken) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): def filter_func(event): diff --git a/moto/organizations/__init__.py b/moto/organizations/__init__.py new file mode 100644 index 000000000..372782dd3 --- /dev/null +++ b/moto/organizations/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import organizations_backend +from ..core.models import base_decorator + +organizations_backends = {"global": organizations_backend} +mock_organizations = base_decorator(organizations_backends) diff --git a/moto/organizations/models.py b/moto/organizations/models.py new file mode 100644 index 000000000..9d5fe3886 --- /dev/null +++ b/moto/organizations/models.py @@ -0,0 +1,296 @@ +from __future__ import unicode_literals + +import datetime +import re + +from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError +from moto.core.utils import unix_time +from moto.organizations import utils + + +class FakeOrganization(BaseModel): + + def __init__(self, feature_set): + self.id = utils.make_random_org_id() + self.root_id = utils.make_random_root_id() + self.feature_set = feature_set + self.master_account_id = utils.MASTER_ACCOUNT_ID + self.master_account_email = utils.MASTER_ACCOUNT_EMAIL + self.available_policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + + @property + def arn(self): + return utils.ORGANIZATION_ARN_FORMAT.format(self.master_account_id, self.id) + + @property + def master_account_arn(self): + return utils.MASTER_ACCOUNT_ARN_FORMAT.format(self.master_account_id, self.id) + + def describe(self): + return { + 'Organization': { + 'Id': self.id, + 'Arn': self.arn, + 'FeatureSet': self.feature_set, + 'MasterAccountArn': self.master_account_arn, + 'MasterAccountId': self.master_account_id, + 'MasterAccountEmail': self.master_account_email, + 'AvailablePolicyTypes': self.available_policy_types, + } + } + + +class FakeAccount(BaseModel): + + def __init__(self, organization, **kwargs): + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.create_account_status_id = utils.make_random_create_account_status_id() + self.id = utils.make_random_account_id() + self.name = kwargs['AccountName'] + self.email = kwargs['Email'] + self.create_time = datetime.datetime.utcnow() + self.status = 'ACTIVE' + self.joined_method = 'CREATED' + self.parent_id = organization.root_id + + @property + def arn(self): + return utils.ACCOUNT_ARN_FORMAT.format( + self.master_account_id, + self.organization_id, + self.id + ) + + @property + def create_account_status(self): + return { + 'CreateAccountStatus': { + 'Id': self.create_account_status_id, + 'AccountName': self.name, + 'State': 'SUCCEEDED', + 'RequestedTimestamp': unix_time(self.create_time), + 'CompletedTimestamp': unix_time(self.create_time), + 'AccountId': self.id, + } + } + + def describe(self): + return { + 'Account': { + 'Id': self.id, + 'Arn': self.arn, + 'Email': self.email, + 'Name': self.name, + 'Status': self.status, + 'JoinedMethod': self.joined_method, + 'JoinedTimestamp': unix_time(self.create_time), + } + } + + +class FakeOrganizationalUnit(BaseModel): + + def __init__(self, organization, **kwargs): + self.type = 'ORGANIZATIONAL_UNIT' + self.organization_id = organization.id + self.master_account_id = organization.master_account_id + self.id = utils.make_random_ou_id(organization.root_id) + self.name = kwargs.get('Name') + self.parent_id = kwargs.get('ParentId') + self._arn_format = utils.OU_ARN_FORMAT + + @property + def arn(self): + return self._arn_format.format( + self.master_account_id, + self.organization_id, + self.id + ) + + def describe(self): + return { + 'OrganizationalUnit': { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + } + } + + +class FakeRoot(FakeOrganizationalUnit): + + def __init__(self, organization, **kwargs): + super(FakeRoot, self).__init__(organization, **kwargs) + self.type = 'ROOT' + self.id = organization.root_id + self.name = 'Root' + self.policy_types = [{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }] + self._arn_format = utils.ROOT_ARN_FORMAT + + def describe(self): + return { + 'Id': self.id, + 'Arn': self.arn, + 'Name': self.name, + 'PolicyTypes': self.policy_types + } + + +class OrganizationsBackend(BaseBackend): + + def __init__(self): + self.org = None + self.accounts = [] + self.ou = [] + + def create_organization(self, **kwargs): + self.org = FakeOrganization(kwargs['FeatureSet']) + self.ou.append(FakeRoot(self.org)) + return self.org.describe() + + def describe_organization(self): + if not self.org: + raise RESTError( + 'AWSOrganizationsNotInUseException', + "Your account is not a member of an organization." + ) + return self.org.describe() + + def list_roots(self): + return dict( + Roots=[ou.describe() for ou in self.ou if isinstance(ou, FakeRoot)] + ) + + def create_organizational_unit(self, **kwargs): + new_ou = FakeOrganizationalUnit(self.org, **kwargs) + self.ou.append(new_ou) + return new_ou.describe() + + def get_organizational_unit_by_id(self, ou_id): + ou = next((ou for ou in self.ou if ou.id == ou_id), None) + if ou is None: + raise RESTError( + 'OrganizationalUnitNotFoundException', + "You specified an organizational unit that doesn't exist." + ) + return ou + + def validate_parent_id(self, parent_id): + try: + self.get_organizational_unit_by_id(parent_id) + except RESTError: + raise RESTError( + 'ParentNotFoundException', + "You specified parent that doesn't exist." + ) + return parent_id + + def describe_organizational_unit(self, **kwargs): + ou = self.get_organizational_unit_by_id(kwargs['OrganizationalUnitId']) + return ou.describe() + + def list_organizational_units_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + OrganizationalUnits=[ + { + 'Id': ou.id, + 'Arn': ou.arn, + 'Name': ou.name, + } + for ou in self.ou + if ou.parent_id == parent_id + ] + ) + + def create_account(self, **kwargs): + new_account = FakeAccount(self.org, **kwargs) + self.accounts.append(new_account) + return new_account.create_account_status + + def get_account_by_id(self, account_id): + account = next(( + account for account in self.accounts + if account.id == account_id + ), None) + if account is None: + raise RESTError( + 'AccountNotFoundException', + "You specified an account that doesn't exist." + ) + return account + + def describe_account(self, **kwargs): + account = self.get_account_by_id(kwargs['AccountId']) + return account.describe() + + def list_accounts(self): + return dict( + Accounts=[account.describe()['Account'] for account in self.accounts] + ) + + def list_accounts_for_parent(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + return dict( + Accounts=[ + account.describe()['Account'] + for account in self.accounts + if account.parent_id == parent_id + ] + ) + + def move_account(self, **kwargs): + new_parent_id = self.validate_parent_id(kwargs['DestinationParentId']) + self.validate_parent_id(kwargs['SourceParentId']) + account = self.get_account_by_id(kwargs['AccountId']) + index = self.accounts.index(account) + self.accounts[index].parent_id = new_parent_id + + def list_parents(self, **kwargs): + if re.compile(r'[0-9]{12}').match(kwargs['ChildId']): + child_object = self.get_account_by_id(kwargs['ChildId']) + else: + child_object = self.get_organizational_unit_by_id(kwargs['ChildId']) + return dict( + Parents=[ + { + 'Id': ou.id, + 'Type': ou.type, + } + for ou in self.ou + if ou.id == child_object.parent_id + ] + ) + + def list_children(self, **kwargs): + parent_id = self.validate_parent_id(kwargs['ParentId']) + if kwargs['ChildType'] == 'ACCOUNT': + obj_list = self.accounts + elif kwargs['ChildType'] == 'ORGANIZATIONAL_UNIT': + obj_list = self.ou + else: + raise RESTError( + 'InvalidInputException', + 'You specified an invalid value.' + ) + return dict( + Children=[ + { + 'Id': obj.id, + 'Type': kwargs['ChildType'], + } + for obj in obj_list + if obj.parent_id == parent_id + ] + ) + + +organizations_backend = OrganizationsBackend() diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py new file mode 100644 index 000000000..966c3fbf3 --- /dev/null +++ b/moto/organizations/responses.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals +import json + +from moto.core.responses import BaseResponse +from .models import organizations_backend + + +class OrganizationsResponse(BaseResponse): + + @property + def organizations_backend(self): + return organizations_backend + + @property + def request_params(self): + try: + return json.loads(self.body) + except ValueError: + return {} + + def _get_param(self, param, default=None): + return self.request_params.get(param, default) + + def create_organization(self): + return json.dumps( + self.organizations_backend.create_organization(**self.request_params) + ) + + def describe_organization(self): + return json.dumps( + self.organizations_backend.describe_organization() + ) + + def list_roots(self): + return json.dumps( + self.organizations_backend.list_roots() + ) + + def create_organizational_unit(self): + return json.dumps( + self.organizations_backend.create_organizational_unit(**self.request_params) + ) + + def describe_organizational_unit(self): + return json.dumps( + self.organizations_backend.describe_organizational_unit(**self.request_params) + ) + + def list_organizational_units_for_parent(self): + return json.dumps( + self.organizations_backend.list_organizational_units_for_parent(**self.request_params) + ) + + def list_parents(self): + return json.dumps( + self.organizations_backend.list_parents(**self.request_params) + ) + + def create_account(self): + return json.dumps( + self.organizations_backend.create_account(**self.request_params) + ) + + def describe_account(self): + return json.dumps( + self.organizations_backend.describe_account(**self.request_params) + ) + + def list_accounts(self): + return json.dumps( + self.organizations_backend.list_accounts() + ) + + def list_accounts_for_parent(self): + return json.dumps( + self.organizations_backend.list_accounts_for_parent(**self.request_params) + ) + + def move_account(self): + return json.dumps( + self.organizations_backend.move_account(**self.request_params) + ) + + def list_children(self): + return json.dumps( + self.organizations_backend.list_children(**self.request_params) + ) diff --git a/moto/organizations/urls.py b/moto/organizations/urls.py new file mode 100644 index 000000000..7911f5b53 --- /dev/null +++ b/moto/organizations/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import OrganizationsResponse + +url_bases = [ + "https?://organizations.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': OrganizationsResponse.dispatch, +} diff --git a/moto/organizations/utils.py b/moto/organizations/utils.py new file mode 100644 index 000000000..007afa6ed --- /dev/null +++ b/moto/organizations/utils.py @@ -0,0 +1,59 @@ +from __future__ import unicode_literals + +import random +import string + +MASTER_ACCOUNT_ID = '123456789012' +MASTER_ACCOUNT_EMAIL = 'fakeorg@moto-example.com' +ORGANIZATION_ARN_FORMAT = 'arn:aws:organizations::{0}:organization/{1}' +MASTER_ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{0}' +ACCOUNT_ARN_FORMAT = 'arn:aws:organizations::{0}:account/{1}/{2}' +ROOT_ARN_FORMAT = 'arn:aws:organizations::{0}:root/{1}/{2}' +OU_ARN_FORMAT = 'arn:aws:organizations::{0}:ou/{1}/{2}' + +CHARSET = string.ascii_lowercase + string.digits +ORG_ID_SIZE = 10 +ROOT_ID_SIZE = 4 +ACCOUNT_ID_SIZE = 12 +OU_ID_SUFFIX_SIZE = 8 +CREATE_ACCOUNT_STATUS_ID_SIZE = 8 + + +def make_random_org_id(): + # The regex pattern for an organization ID string requires "o-" + # followed by from 10 to 32 lower-case letters or digits. + # e.g. 'o-vipjnq5z86' + return 'o-' + ''.join(random.choice(CHARSET) for x in range(ORG_ID_SIZE)) + + +def make_random_root_id(): + # The regex pattern for a root ID string requires "r-" followed by + # from 4 to 32 lower-case letters or digits. + # e.g. 'r-3zwx' + return 'r-' + ''.join(random.choice(CHARSET) for x in range(ROOT_ID_SIZE)) + + +def make_random_ou_id(root_id): + # The regex pattern for an organizational unit ID string requires "ou-" + # followed by from 4 to 32 lower-case letters or digits (the ID of the root + # that contains the OU) followed by a second "-" dash and from 8 to 32 + # additional lower-case letters or digits. + # e.g. ou-g8sd-5oe3bjaw + return '-'.join([ + 'ou', + root_id.partition('-')[2], + ''.join(random.choice(CHARSET) for x in range(OU_ID_SUFFIX_SIZE)), + ]) + + +def make_random_account_id(): + # The regex pattern for an account ID string requires exactly 12 digits. + # e.g. '488633172133' + return ''.join([random.choice(string.digits) for n in range(ACCOUNT_ID_SIZE)]) + + +def make_random_create_account_status_id(): + # The regex pattern for an create account request ID string requires + # "car-" followed by from 8 to 32 lower-case letters or digits. + # e.g. 'car-35gxzwrp' + return 'car-' + ''.join(random.choice(CHARSET) for x in range(CREATE_ACCOUNT_STATUS_ID_SIZE)) diff --git a/moto/packages/httpretty/core.py b/moto/packages/httpretty/core.py index e0f3a7e69..8ad9168a5 100644 --- a/moto/packages/httpretty/core.py +++ b/moto/packages/httpretty/core.py @@ -85,6 +85,7 @@ old_socksocket = None old_ssl_wrap_socket = None old_sslwrap_simple = None old_sslsocket = None +old_sslcontext_wrap_socket = None if PY3: # pragma: no cover basestring = (bytes, str) @@ -100,6 +101,10 @@ try: # pragma: no cover if not PY3: old_sslwrap_simple = ssl.sslwrap_simple old_sslsocket = ssl.SSLSocket + try: + old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket + except AttributeError: + pass except ImportError: # pragma: no cover ssl = None @@ -281,7 +286,7 @@ class fakesock(object): return { 'notAfter': shift.strftime('%b %d %H:%M:%S GMT'), 'subjectAltName': ( - ('DNS', '*%s' % self._host), + ('DNS', '*.%s' % self._host), ('DNS', self._host), ('DNS', '*'), ), @@ -772,7 +777,7 @@ class URIMatcher(object): def __init__(self, uri, entries, match_querystring=False): self._match_querystring = match_querystring - if type(uri).__name__ == 'SRE_Pattern': + if type(uri).__name__ in ('SRE_Pattern', 'Pattern'): self.regex = uri result = urlsplit(uri.pattern) if result.scheme == 'https': @@ -1012,6 +1017,10 @@ class httpretty(HttpBaseClass): if ssl: ssl.wrap_socket = old_ssl_wrap_socket ssl.SSLSocket = old_sslsocket + try: + ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket + except AttributeError: + pass ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket ssl.__dict__['SSLSocket'] = old_sslsocket @@ -1058,6 +1067,14 @@ class httpretty(HttpBaseClass): ssl.wrap_socket = fake_wrap_socket ssl.SSLSocket = FakeSSLSocket + try: + def fake_sslcontext_wrap_socket(cls, *args, **kwargs): + return fake_wrap_socket(*args, **kwargs) + + ssl.SSLContext.wrap_socket = fake_sslcontext_wrap_socket + except AttributeError: + pass + ssl.__dict__['wrap_socket'] = fake_wrap_socket ssl.__dict__['SSLSocket'] = FakeSSLSocket diff --git a/moto/rds/models.py b/moto/rds/models.py index 77deff09d..feecefe0c 100644 --- a/moto/rds/models.py +++ b/moto/rds/models.py @@ -48,6 +48,10 @@ class Database(BaseModel): if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False + self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -137,6 +141,7 @@ class Database(BaseModel): "multi_az": properties.get("MultiAZ"), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), @@ -217,6 +222,7 @@ class Database(BaseModel): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 3fc4b6d65..fee004f76 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -73,6 +73,9 @@ class Database(BaseModel): self.publicly_accessible = kwargs.get("publicly_accessible") if self.publicly_accessible is None: self.publicly_accessible = True + self.copy_tags_to_snapshot = kwargs.get("copy_tags_to_snapshot") + if self.copy_tags_to_snapshot is None: + self.copy_tags_to_snapshot = False self.backup_retention_period = kwargs.get("backup_retention_period") if self.backup_retention_period is None: self.backup_retention_period = 1 @@ -208,6 +211,7 @@ class Database(BaseModel): {% endif %} {{ database.publicly_accessible }} + {{ database.copy_tags_to_snapshot }} {{ database.auto_minor_version_upgrade }} {{ database.allocated_storage }} {{ database.storage_encrypted }} @@ -304,6 +308,7 @@ class Database(BaseModel): "db_parameter_group_name": properties.get('DBParameterGroupName'), "port": properties.get('Port', 3306), "publicly_accessible": properties.get("PubliclyAccessible"), + "copy_tags_to_snapshot": properties.get("CopyTagsToSnapshot"), "region": region_name, "security_groups": security_groups, "storage_encrypted": properties.get("StorageEncrypted"), @@ -362,6 +367,7 @@ class Database(BaseModel): "PreferredBackupWindow": "{{ database.preferred_backup_window }}", "PreferredMaintenanceWindow": "{{ database.preferred_maintenance_window }}", "PubliclyAccessible": "{{ database.publicly_accessible }}", + "CopyTagsToSnapshot": "{{ database.copy_tags_to_snapshot }}", "AllocatedStorage": "{{ database.allocated_storage }}", "Endpoint": { "Address": "{{ database.address }}", @@ -411,10 +417,10 @@ class Database(BaseModel): class Snapshot(BaseModel): - def __init__(self, database, snapshot_id, tags=None): + def __init__(self, database, snapshot_id, tags): self.database = database self.snapshot_id = snapshot_id - self.tags = tags or [] + self.tags = tags self.created_at = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) @property @@ -456,6 +462,20 @@ class Snapshot(BaseModel): """) return template.render(snapshot=self, database=self.database) + def get_tags(self): + return self.tags + + def add_tags(self, tags): + new_keys = [tag_set['Key'] for tag_set in tags] + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in new_keys] + self.tags.extend(tags) + return self.tags + + def remove_tags(self, tag_keys): + self.tags = [tag_set for tag_set in self.tags if tag_set[ + 'Key'] not in tag_keys] + class SecurityGroup(BaseModel): @@ -691,6 +711,10 @@ class RDS2Backend(BaseBackend): raise DBSnapshotAlreadyExistsError(db_snapshot_identifier) if len(self.snapshots) >= int(os.environ.get('MOTO_RDS_SNAPSHOT_LIMIT', '100')): raise SnapshotQuotaExceededError() + if tags is None: + tags = list() + if database.copy_tags_to_snapshot and not tags: + tags = database.get_tags() snapshot = Snapshot(database, db_snapshot_identifier, tags) self.snapshots[db_snapshot_identifier] = snapshot return snapshot @@ -787,13 +811,13 @@ class RDS2Backend(BaseBackend): def delete_database(self, db_instance_identifier, db_snapshot_name=None): if db_instance_identifier in self.databases: + if db_snapshot_name: + self.create_snapshot(db_instance_identifier, db_snapshot_name) database = self.databases.pop(db_instance_identifier) if database.is_replica: primary = self.find_db_from_id(database.source_db_identifier) primary.remove_replica(database) database.status = 'deleting' - if db_snapshot_name: - self.snapshots[db_snapshot_name] = Snapshot(database, db_snapshot_name) return database else: raise DBInstanceNotFoundError(db_instance_identifier) @@ -1028,8 +1052,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].get_tags() elif resource_type == 'snapshot': # DB Snapshot - # TODO: Complete call to tags on resource type DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].get_tags() elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].get_tags() @@ -1059,7 +1083,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].remove_tags(tag_keys) elif resource_type == 'snapshot': # DB Snapshot - return None + if resource_name in self.snapshots: + return self.snapshots[resource_name].remove_tags(tag_keys) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].remove_tags(tag_keys) @@ -1088,7 +1113,8 @@ class RDS2Backend(BaseBackend): if resource_name in self.security_groups: return self.security_groups[resource_name].add_tags(tags) elif resource_type == 'snapshot': # DB Snapshot - return [] + if resource_name in self.snapshots: + return self.snapshots[resource_name].add_tags(tags) elif resource_type == 'subgrp': # DB subnet group if resource_name in self.subnet_groups: return self.subnet_groups[resource_name].add_tags(tags) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index eddb0042b..66d4e0c52 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -19,6 +19,7 @@ class RDS2Response(BaseResponse): "allocated_storage": self._get_int_param('AllocatedStorage'), "availability_zone": self._get_param("AvailabilityZone"), "backup_retention_period": self._get_param("BackupRetentionPeriod"), + "copy_tags_to_snapshot": self._get_param("CopyTagsToSnapshot"), "db_instance_class": self._get_param('DBInstanceClass'), "db_instance_identifier": self._get_param('DBInstanceIdentifier'), "db_name": self._get_param("DBName"), @@ -159,7 +160,7 @@ class RDS2Response(BaseResponse): def create_db_snapshot(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_snapshot_identifier = self._get_param('DBSnapshotIdentifier') - tags = self._get_param('Tags', []) + tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) snapshot = self.backend.create_snapshot(db_instance_identifier, db_snapshot_identifier, tags) template = self.response_template(CREATE_SNAPSHOT_TEMPLATE) return template.render(snapshot=snapshot) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index 4eafcfc79..70cbb95cb 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -78,6 +78,7 @@ class Cluster(TaggableResourceMixin, BaseModel): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier + self.create_time = iso_8601_datetime_with_milliseconds(datetime.datetime.now()) self.status = 'available' self.node_type = node_type self.master_username = master_username @@ -237,6 +238,7 @@ class Cluster(TaggableResourceMixin, BaseModel): "Address": self.endpoint, "Port": self.port }, + 'ClusterCreateTime': self.create_time, "PendingModifiedValues": [], "Tags": self.tags, "IamRoles": [{ diff --git a/moto/s3/models.py b/moto/s3/models.py index cf5628141..bb4d7848c 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -27,8 +27,14 @@ class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key + self.name = key.name + self.last_modified = datetime.datetime.utcnow() self._version_id = key.version_id + 1 + @property + def last_modified_ISO8601(self): + return iso_8601_datetime_with_milliseconds(self.last_modified) + @property def version_id(self): return self._version_id @@ -335,8 +341,9 @@ class LifecycleAndFilter(BaseModel): class LifecycleRule(BaseModel): def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None, - expiration_date=None, transition_days=None, expired_object_delete_marker=None, - transition_date=None, storage_class=None): + expiration_date=None, transition_days=None, transition_date=None, storage_class=None, + expired_object_delete_marker=None, nve_noncurrent_days=None, nvt_noncurrent_days=None, + nvt_storage_class=None, aimu_days=None): self.id = id self.prefix = prefix self.filter = lc_filter @@ -345,8 +352,12 @@ class LifecycleRule(BaseModel): self.expiration_date = expiration_date self.transition_days = transition_days self.transition_date = transition_date - self.expired_object_delete_marker = expired_object_delete_marker self.storage_class = storage_class + self.expired_object_delete_marker = expired_object_delete_marker + self.nve_noncurrent_days = nve_noncurrent_days + self.nvt_noncurrent_days = nvt_noncurrent_days + self.nvt_storage_class = nvt_storage_class + self.aimu_days = aimu_days class CorsRule(BaseModel): @@ -408,9 +419,32 @@ class FakeBucket(BaseModel): def set_lifecycle(self, rules): self.rules = [] for rule in rules: + # Extract and validate actions from Lifecycle rule expiration = rule.get('Expiration') transition = rule.get('Transition') + nve_noncurrent_days = None + if rule.get('NoncurrentVersionExpiration') is not None: + if rule["NoncurrentVersionExpiration"].get('NoncurrentDays') is None: + raise MalformedXML() + nve_noncurrent_days = rule["NoncurrentVersionExpiration"]["NoncurrentDays"] + + nvt_noncurrent_days = None + nvt_storage_class = None + if rule.get('NoncurrentVersionTransition') is not None: + if rule["NoncurrentVersionTransition"].get('NoncurrentDays') is None: + raise MalformedXML() + if rule["NoncurrentVersionTransition"].get('StorageClass') is None: + raise MalformedXML() + nvt_noncurrent_days = rule["NoncurrentVersionTransition"]["NoncurrentDays"] + nvt_storage_class = rule["NoncurrentVersionTransition"]["StorageClass"] + + aimu_days = None + if rule.get('AbortIncompleteMultipartUpload') is not None: + if rule["AbortIncompleteMultipartUpload"].get('DaysAfterInitiation') is None: + raise MalformedXML() + aimu_days = rule["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] + eodm = None if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None: # This cannot be set if Date or Days is set: @@ -453,11 +487,14 @@ class FakeBucket(BaseModel): status=rule['Status'], expiration_days=expiration.get('Days') if expiration else None, expiration_date=expiration.get('Date') if expiration else None, - expired_object_delete_marker=eodm, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, - storage_class=transition[ - 'StorageClass'] if transition else None, + storage_class=transition.get('StorageClass') if transition else None, + expired_object_delete_marker=eodm, + nve_noncurrent_days=nve_noncurrent_days, + nvt_noncurrent_days=nvt_noncurrent_days, + nvt_storage_class=nvt_storage_class, + aimu_days=aimu_days, )) def delete_lifecycle(self): @@ -630,10 +667,7 @@ class S3Backend(BaseBackend): latest_versions = {} for version in versions: - if isinstance(version, FakeDeleteMarker): - name = version.key.name - else: - name = version.name + name = version.name version_id = version.version_id maximum_version_per_key[name] = max( version_id, diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 5e7cf0fe5..962025cb1 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1228,6 +1228,22 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% endif %} {% endif %} + {% if rule.nvt_noncurrent_days and rule.nvt_storage_class %} + + {{ rule.nvt_noncurrent_days }} + {{ rule.nvt_storage_class }} + + {% endif %} + {% if rule.nve_noncurrent_days %} + + {{ rule.nve_noncurrent_days }} + + {% endif %} + {% if rule.aimu_days %} + + {{ rule.aimu_days }} + + {% endif %} {% endfor %} @@ -1273,10 +1289,10 @@ S3_BUCKET_GET_VERSIONS = """ {% endfor %} {% for marker in delete_marker_list %} - {{ marker.key.name }} + {{ marker.name }} {{ marker.version_id }} - {% if latest_versions[marker.key.name] == marker.version_id %}true{% else %}false{% endif %} - {{ marker.key.last_modified_ISO8601 }} + {% if latest_versions[marker.name] == marker.version_id %}true{% else %}false{% endif %} + {{ marker.last_modified_ISO8601 }} 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a webfile @@ -1433,7 +1449,7 @@ S3_MULTIPART_LIST_RESPONSE = """ STANDARD 1 - {{ count }} + {{ count }} {{ count }} false {% for part in parts %} diff --git a/moto/secretsmanager/models.py b/moto/secretsmanager/models.py index c60feb530..1404a0ec8 100644 --- a/moto/secretsmanager/models.py +++ b/moto/secretsmanager/models.py @@ -36,6 +36,7 @@ class SecretsManagerBackend(BaseBackend): self.rotation_enabled = False self.rotation_lambda_arn = '' self.auto_rotate_after_days = 0 + self.version_id = '' def reset(self): region_name = self.region @@ -105,6 +106,56 @@ class SecretsManagerBackend(BaseBackend): return response + def rotate_secret(self, secret_id, client_request_token=None, + rotation_lambda_arn=None, rotation_rules=None): + + rotation_days = 'AutomaticallyAfterDays' + + if not self._is_valid_identifier(secret_id): + raise ResourceNotFoundException + + if client_request_token: + token_length = len(client_request_token) + if token_length < 32 or token_length > 64: + msg = ( + 'ClientRequestToken ' + 'must be 32-64 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_lambda_arn: + if len(rotation_lambda_arn) > 2048: + msg = ( + 'RotationLambdaARN ' + 'must <= 2048 characters long.' + ) + raise InvalidParameterException(msg) + + if rotation_rules: + if rotation_days in rotation_rules: + rotation_period = rotation_rules[rotation_days] + if rotation_period < 1 or rotation_period > 1000: + msg = ( + 'RotationRules.AutomaticallyAfterDays ' + 'must be within 1-1000.' + ) + raise InvalidParameterException(msg) + + self.version_id = client_request_token or '' + self.rotation_lambda_arn = rotation_lambda_arn or '' + if rotation_rules: + self.auto_rotate_after_days = rotation_rules.get(rotation_days, 0) + if self.auto_rotate_after_days > 0: + self.rotation_enabled = True + + response = json.dumps({ + "ARN": secret_arn(self.region, self.secret_id), + "Name": self.name, + "VersionId": self.version_id + }) + + return response + def get_random_password(self, password_length, exclude_characters, exclude_numbers, exclude_punctuation, exclude_uppercase, diff --git a/moto/secretsmanager/responses.py b/moto/secretsmanager/responses.py index c50c6a6e1..b8b6872a8 100644 --- a/moto/secretsmanager/responses.py +++ b/moto/secretsmanager/responses.py @@ -50,3 +50,15 @@ class SecretsManagerResponse(BaseResponse): return secretsmanager_backends[self.region].describe_secret( secret_id=secret_id ) + + def rotate_secret(self): + client_request_token = self._get_param('ClientRequestToken') + rotation_lambda_arn = self._get_param('RotationLambdaARN') + rotation_rules = self._get_param('RotationRules') + secret_id = self._get_param('SecretId') + return secretsmanager_backends[self.region].rotate_secret( + secret_id=secret_id, + client_request_token=client_request_token, + rotation_lambda_arn=rotation_lambda_arn, + rotation_rules=rotation_rules + ) diff --git a/moto/server.py b/moto/server.py index aad47757a..ba2470478 100644 --- a/moto/server.py +++ b/moto/server.py @@ -34,6 +34,9 @@ class DomainDispatcherApplication(object): self.service = service def get_backend_for_host(self, host): + if host == 'moto_api': + return host + if self.service: return self.service diff --git a/moto/ses/models.py b/moto/ses/models.py index 3dced60f2..71fe9d9a1 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -49,7 +49,8 @@ class SESBackend(BaseBackend): self.sent_messages = [] self.sent_message_count = 0 - def _is_verified_address(self, address): + def _is_verified_address(self, source): + _, address = parseaddr(source) if address in self.addresses: return True user, host = address.split('@', 1) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index b8db356e9..f3262a988 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -385,10 +385,22 @@ class SQSBackend(BaseBackend): def create_queue(self, name, **kwargs): queue = self.queues.get(name) if queue: - # Queue already exist. If attributes don't match, throw error - for key, value in kwargs.items(): - if getattr(queue, camelcase_to_underscores(key)) != value: - raise QueueAlreadyExists("The specified queue already exists.") + try: + kwargs.pop('region') + except KeyError: + pass + + new_queue = Queue(name, region=self.region_name, **kwargs) + + queue_attributes = queue.attributes + new_queue_attributes = new_queue.attributes + + for key in ['CreatedTimestamp', 'LastModifiedTimestamp']: + queue_attributes.pop(key) + new_queue_attributes.pop(key) + + if queue_attributes != new_queue_attributes: + raise QueueAlreadyExists("The specified queue already exists.") else: try: kwargs.pop('region') diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index c489d7118..b4f64b14e 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -336,7 +336,7 @@ class SQSResponse(BaseResponse): try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: - wait_time = queue.receive_message_wait_time_seconds + wait_time = int(queue.receive_message_wait_time_seconds) if wait_time < 0 or wait_time > 20: return self._error( diff --git a/moto/ssm/models.py b/moto/ssm/models.py index 656a14839..f16a7d981 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -5,10 +5,12 @@ from collections import defaultdict from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.ec2 import ec2_backends +from moto.cloudformation import cloudformation_backends import datetime import time import uuid +import itertools class Parameter(BaseModel): @@ -67,7 +69,7 @@ class Command(BaseModel): instance_ids=None, max_concurrency='', max_errors='', notification_config=None, output_s3_bucket_name='', output_s3_key_prefix='', output_s3_region='', parameters=None, - service_role_arn='', targets=None): + service_role_arn='', targets=None, backend_region='us-east-1'): if instance_ids is None: instance_ids = [] @@ -88,9 +90,9 @@ class Command(BaseModel): self.status = 'Success' self.status_details = 'Details placeholder' - now = datetime.datetime.now() - self.requested_date_time = now.isoformat() - expires_after = now + datetime.timedelta(0, timeout_seconds) + self.requested_date_time = datetime.datetime.now() + self.requested_date_time_iso = self.requested_date_time.isoformat() + expires_after = self.requested_date_time + datetime.timedelta(0, timeout_seconds) self.expires_after = expires_after.isoformat() self.comment = comment @@ -105,6 +107,32 @@ class Command(BaseModel): self.parameters = parameters self.service_role_arn = service_role_arn self.targets = targets + self.backend_region = backend_region + + # Get instance ids from a cloud formation stack target. + stack_instance_ids = [self.get_instance_ids_by_stack_ids(target['Values']) for + target in self.targets if + target['Key'] == 'tag:aws:cloudformation:stack-name'] + + self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids)) + + # Create invocations with a single run command plugin. + self.invocations = [] + for instance_id in self.instance_ids: + self.invocations.append( + self.invocation_response(instance_id, "aws:runShellScript")) + + def get_instance_ids_by_stack_ids(self, stack_ids): + instance_ids = [] + cloudformation_backend = cloudformation_backends[self.backend_region] + for stack_id in stack_ids: + stack_resources = cloudformation_backend.list_stack_resources(stack_id) + instance_resources = [ + instance.id for instance in stack_resources + if instance.type == "AWS::EC2::Instance"] + instance_ids.extend(instance_resources) + + return instance_ids def response_object(self): r = { @@ -122,7 +150,7 @@ class Command(BaseModel): 'OutputS3BucketName': self.output_s3_bucket_name, 'OutputS3KeyPrefix': self.output_s3_key_prefix, 'Parameters': self.parameters, - 'RequestedDateTime': self.requested_date_time, + 'RequestedDateTime': self.requested_date_time_iso, 'ServiceRole': self.service_role_arn, 'Status': self.status, 'StatusDetails': self.status_details, @@ -132,6 +160,50 @@ class Command(BaseModel): return r + def invocation_response(self, instance_id, plugin_name): + # Calculate elapsed time from requested time and now. Use a hardcoded + # elapsed time since there is no easy way to convert a timedelta to + # an ISO 8601 duration string. + elapsed_time_iso = "PT5M" + elapsed_time_delta = datetime.timedelta(minutes=5) + end_time = self.requested_date_time + elapsed_time_delta + + r = { + 'CommandId': self.command_id, + 'InstanceId': instance_id, + 'Comment': self.comment, + 'DocumentName': self.document_name, + 'PluginName': plugin_name, + 'ResponseCode': 0, + 'ExecutionStartDateTime': self.requested_date_time_iso, + 'ExecutionElapsedTime': elapsed_time_iso, + 'ExecutionEndDateTime': end_time.isoformat(), + 'Status': 'Success', + 'StatusDetails': 'Success', + 'StandardOutputContent': '', + 'StandardOutputUrl': '', + 'StandardErrorContent': '', + } + + return r + + def get_invocation(self, instance_id, plugin_name): + invocation = next( + (invocation for invocation in self.invocations + if invocation['InstanceId'] == instance_id), None) + + if invocation is None: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + if plugin_name is not None and invocation['PluginName'] != plugin_name: + raise RESTError( + 'InvocationDoesNotExist', + 'An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation') + + return invocation + class SimpleSystemManagerBackend(BaseBackend): @@ -140,6 +212,11 @@ class SimpleSystemManagerBackend(BaseBackend): self._resource_tags = defaultdict(lambda: defaultdict(dict)) self._commands = [] + # figure out what region we're in + for region, backend in ssm_backends.items(): + if backend == self: + self._region = region + def delete_parameter(self, name): try: del self._parameters[name] @@ -260,7 +337,8 @@ class SimpleSystemManagerBackend(BaseBackend): output_s3_region=kwargs.get('OutputS3Region', ''), parameters=kwargs.get('Parameters', {}), service_role_arn=kwargs.get('ServiceRoleArn', ''), - targets=kwargs.get('Targets', [])) + targets=kwargs.get('Targets', []), + backend_region=self._region) self._commands.append(command) return { @@ -298,6 +376,18 @@ class SimpleSystemManagerBackend(BaseBackend): command for command in self._commands if instance_id in command.instance_ids] + def get_command_invocation(self, **kwargs): + """ + https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html + """ + + command_id = kwargs.get('CommandId') + instance_id = kwargs.get('InstanceId') + plugin_name = kwargs.get('PluginName', None) + + command = self.get_command_by_id(command_id) + return command.get_invocation(instance_id, plugin_name) + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index fd0d8b630..eb05e51b6 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -210,3 +210,8 @@ class SimpleSystemManagerResponse(BaseResponse): return json.dumps( self.ssm_backend.list_commands(**self.request_params) ) + + def get_command_invocation(self): + return json.dumps( + self.ssm_backend.get_command_invocation(**self.request_params) + ) diff --git a/requirements-dev.txt b/requirements-dev.txt index 655be0616..111cd5f3f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,7 +1,7 @@ -r requirements.txt mock nose -sure==1.2.24 +sure==1.4.11 coverage flake8==3.5.0 freezegun @@ -13,5 +13,5 @@ six>=1.9 prompt-toolkit==1.0.14 click==6.7 inflection==0.3.1 -lxml==4.0.0 +lxml==4.2.3 beautifulsoup4==4.6.0 diff --git a/setup.py b/setup.py index 16aaf1452..98780dd5a 100755 --- a/setup.py +++ b/setup.py @@ -8,10 +8,9 @@ import sys install_requires = [ "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.6.16", + "boto3>=1.6.16,<1.8", "botocore>=1.9.16,<1.11", - "cookies", - "cryptography>=2.0.0", + "cryptography>=2.3.0", "requests>=2.5", "xmltodict", "six>1.9", @@ -41,7 +40,7 @@ else: setup( name='moto', - version='1.3.5', + version='1.3.6', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index a38107b99..ac79fa223 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -31,6 +31,7 @@ def test_create_identity_pool(): # testing a helper function def test_get_random_identity_id(): assert len(get_random_identity_id('us-west-2')) > 0 + assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 @mock_cognitoidentity @@ -69,3 +70,16 @@ def test_get_open_id_token_for_developer_identity(): ) assert len(result['Token']) assert result['IdentityId'] == '12345' + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert len(result['IdentityId']) > 0 diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index b2bd469ce..56d7c08a8 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -6,6 +6,7 @@ import os import uuid from jose import jws + from moto import mock_cognitoidp import sure # noqa @@ -24,6 +25,7 @@ def test_create_user_pool(): ) result["UserPool"]["Id"].should_not.be.none + result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) @@ -399,15 +401,22 @@ def authentication_flow(conn): username = str(uuid.uuid4()) temporary_password = str(uuid.uuid4()) user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) client_id = conn.create_user_pool_client( UserPoolId=user_pool_id, ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name] )["UserPoolClient"]["ClientId"] conn.admin_create_user( UserPoolId=user_pool_id, Username=username, TemporaryPassword=temporary_password, + UserAttributes=[{ + 'Name': user_attribute_name, + 'Value': user_attribute_value + }] ) result = conn.admin_initiate_auth( @@ -446,6 +455,9 @@ def authentication_flow(conn): "access_token": result["AuthenticationResult"]["AccessToken"], "username": username, "password": new_password, + "additional_fields": { + user_attribute_name: user_attribute_value + } } @@ -475,6 +487,8 @@ def test_token_legitimacy(): access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) + for k, v in outputs["additional_fields"].items(): + access_claims[k].should.equal(v) @mock_cognitoidp diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 9e3638cc2..5d2f6a4ef 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -85,3 +85,14 @@ class TesterWithSetup(unittest.TestCase): def test_still_the_same(self): bucket = self.conn.get_bucket('mybucket') bucket.name.should.equal("mybucket") + + +@mock_s3_deprecated +class TesterWithStaticmethod(object): + + @staticmethod + def static(*args): + assert not args or not isinstance(args[0], TesterWithStaticmethod) + + def test_no_instance_sent_to_staticmethod(self): + self.static() diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index ab8f25856..afc919dd7 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -201,6 +201,48 @@ def test_item_add_empty_string_exception(): ) +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + with assert_raises(ClientError) as ex: + conn.update_item( + TableName=name, + Key={ + 'forum_name': { 'S': 'LOLCat Forum'}, + }, + UpdateExpression='set Body=:Body', + ExpressionAttributeValues={ + ':Body': {'S': ''} + }) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_invalid_table(): @@ -658,8 +700,8 @@ def test_filter_expression(): filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) filter_expr.expr(row1).should.be(True) - # attribute function tests - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists(User)', {}, {}) + # attribute function tests (with extra spaces) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) filter_expr.expr(row1).should.be(True) filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) @@ -1178,7 +1220,8 @@ def test_update_if_not_exists(): 'forum_name': 'the-key', 'subject': '123' }, - UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + # if_not_exists without space + UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', ExpressionAttributeValues={ ':created_at': 123 } @@ -1191,7 +1234,8 @@ def test_update_if_not_exists(): 'forum_name': 'the-key', 'subject': '123' }, - UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + # if_not_exists with space + UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', ExpressionAttributeValues={ ':created_at': 456 } diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 8930838c6..442e41dde 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -615,8 +615,8 @@ def test_copy_snapshot(): dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) attribs = ['data_encryption_key_id', 'encrypted', - 'kms_key_id', 'owner_alias', 'owner_id', 'progress', - 'start_time', 'state', 'state_message', + 'kms_key_id', 'owner_alias', 'owner_id', + 'progress', 'state', 'state_message', 'tags', 'volume_id', 'volume_size'] for attrib in attribs: diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 6722eed60..1f98791b3 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -2,12 +2,15 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError +import boto3 import boto from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2, mock_ec2_deprecated from tests.helpers import requires_boto_gte @@ -93,3 +96,37 @@ def test_vpc_peering_connections_delete(): cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_vpc_peering_connections_cross_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + vpc_pcx.status['Code'].should.equal('initiating-request') + vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_fail(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering wrong region with no vpc + with assert_raises(ClientError) as cm: + ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-2') + cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index bf72dc230..70c1463ee 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -304,6 +304,52 @@ def test_create_service(): response['service']['status'].should.equal('ACTIVE') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('REPLICA') + +@mock_ecs +def test_create_service_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON', + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('DAEMON') @mock_ecs @@ -411,6 +457,72 @@ def test_describe_services(): response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') +@mock_ecs +def test_describe_services_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON' + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', + 'test_ecs_service3'] + ) + len(response['services']).should.equal(3) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + response['services'][0]['schedulingStrategy'].should.equal('REPLICA') + response['services'][1]['schedulingStrategy'].should.equal('DAEMON') + response['services'][2]['schedulingStrategy'].should.equal('REPLICA') + + @mock_ecs def test_update_service(): client = boto3.client('ecs', region_name='us-east-1') @@ -449,6 +561,7 @@ def test_update_service(): desiredCount=0 ) response['service']['desiredCount'].should.equal(0) + response['service']['schedulingStrategy'].should.equal('REPLICA') @mock_ecs @@ -515,8 +628,10 @@ def test_delete_service(): 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') response['service']['serviceName'].should.equal('test_ecs_service') response['service']['status'].should.equal('ACTIVE') + response['service']['schedulingStrategy'].should.equal('REPLICA') response['service']['taskDefinition'].should.equal( 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + @mock_ec2 diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index 5827e70c7..a67508430 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -723,6 +723,40 @@ def test_describe_instance_health(): instances_health[0].state.should.equal('InService') +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + @mock_elb def test_add_remove_tags(): client = boto3.client('elb', region_name='us-east-1') diff --git a/tests/test_glue/fixtures/datacatalog.py b/tests/test_glue/fixtures/datacatalog.py index b2efe4154..edad2f0f4 100644 --- a/tests/test_glue/fixtures/datacatalog.py +++ b/tests/test_glue/fixtures/datacatalog.py @@ -29,3 +29,28 @@ TABLE_INPUT = { }, 'TableType': 'EXTERNAL_TABLE', } + + +PARTITION_INPUT = { + # 'DatabaseName': 'dbname', + 'StorageDescriptor': { + 'BucketColumns': [], + 'Columns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'Location': 's3://.../partition=value', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, + 'SkewedInfo': {'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': []}, + 'SortColumns': [], + 'StoredAsSubDirectories': False, + }, + # 'TableName': 'source_table', + # 'Values': ['2018-06-26'], +} diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index 4a51f9117..331b99867 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import copy -from .fixtures.datacatalog import TABLE_INPUT +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT def create_database(client, database_name): @@ -17,22 +17,38 @@ def get_database(client, database_name): return client.get_database(Name=database_name) -def create_table_input(table_name, s3_location, columns=[], partition_keys=[]): +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): table_input = copy.deepcopy(TABLE_INPUT) table_input['Name'] = table_name table_input['PartitionKeys'] = partition_keys table_input['StorageDescriptor']['Columns'] = columns - table_input['StorageDescriptor']['Location'] = s3_location + table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) return table_input -def create_table(client, database_name, table_name, table_input): +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + return client.create_table( DatabaseName=database_name, TableInput=table_input ) +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table( + DatabaseName=database_name, + TableInput=table_input, + ) + + def get_table(client, database_name, table_name): return client.get_table( DatabaseName=database_name, @@ -44,3 +60,60 @@ def get_tables(client, database_name): return client.get_tables( DatabaseName=database_name ) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions( + DatabaseName=database_name, + TableName=table_name + ) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, + TableName=table_name, + VersionId=version_id, + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input['Values'] = values + part_input['StorageDescriptor']['Columns'] = columns + part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input + ) + + +def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 7dabeb1f3..a457d5127 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -1,10 +1,15 @@ from __future__ import unicode_literals import sure # noqa +import re from nose.tools import assert_raises import boto3 from botocore.client import ClientError + +from datetime import datetime +import pytz + from moto import mock_glue from . import helpers @@ -30,7 +35,19 @@ def test_create_database_already_exists(): with assert_raises(ClientError) as exc: helpers.create_database(client, database_name) - exc.exception.response['Error']['Code'].should.equal('DatabaseAlreadyExistsException') + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') @mock_glue @@ -40,12 +57,7 @@ def test_create_table(): helpers.create_database(client, database_name) table_name = 'myspecialtable' - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - table_input = helpers.create_table_input(table_name, s3_location) + table_input = helpers.create_table_input(database_name, table_name) helpers.create_table(client, database_name, table_name, table_input) response = helpers.get_table(client, database_name, table_name) @@ -63,18 +75,12 @@ def test_create_table_already_exists(): helpers.create_database(client, database_name) table_name = 'cantcreatethistabletwice' - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - table_input = helpers.create_table_input(table_name, s3_location) - helpers.create_table(client, database_name, table_name, table_input) + helpers.create_table(client, database_name, table_name) with assert_raises(ClientError) as exc: - helpers.create_table(client, database_name, table_name, table_input) + helpers.create_table(client, database_name, table_name) - exc.exception.response['Error']['Code'].should.equal('TableAlreadyExistsException') + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') @mock_glue @@ -87,11 +93,7 @@ def test_get_tables(): table_inputs = {} for table_name in table_names: - s3_location = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - table_input = helpers.create_table_input(table_name, s3_location) + table_input = helpers.create_table_input(database_name, table_name) table_inputs[table_name] = table_input helpers.create_table(client, database_name, table_name, table_input) @@ -99,10 +101,326 @@ def test_get_tables(): tables = response['TableList'] - assert len(tables) == 3 + tables.should.have.length_of(3) for table in tables: table_name = table['Name'] table_name.should.equal(table_inputs[table_name]['Name']) table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 2225f0644..bc23ff712 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -286,6 +286,16 @@ def test_create_policy_versions(): PolicyDocument='{"some":"policy"}') version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) +@mock_iam +def test_get_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestGetPolicy", + PolicyDocument='{"some":"policy"}') + policy = conn.get_policy( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + @mock_iam def test_get_policy_version(): @@ -314,17 +324,22 @@ def test_list_policy_versions(): PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") conn.create_policy( PolicyName="TestListPolicyVersions", - PolicyDocument='{"some":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument='{"first":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[0].get('VersionId').should.equal('v1') + conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", PolicyDocument='{"second":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"third":"policy"}') versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - versions.get('Versions')[0].get('Document').should.equal({'first': 'policy'}) + print(versions.get('Versions')) versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) + versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) @mock_iam @@ -332,20 +347,20 @@ def test_delete_policy_version(): conn = boto3.client('iam', region_name='us-east-1') conn.create_policy( PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"some":"policy"}') + PolicyDocument='{"first":"policy"}') conn.create_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') + PolicyDocument='{"second":"policy"}') with assert_raises(ClientError): conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", VersionId='v2-nope-this-does-not-exist') conn.delete_policy_version( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v1') + VersionId='v2') versions = conn.list_policy_versions( PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") - len(versions.get('Versions')).should.equal(0) + len(versions.get('Versions')).should.equal(1) @mock_iam_deprecated() diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 96715de71..8bccae27a 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,5 +1,5 @@ from __future__ import unicode_literals -import re +import os, re import boto3 import boto.kms @@ -8,6 +8,9 @@ from boto.kms.exceptions import AlreadyExistsException, NotFoundException import sure # noqa from moto import mock_kms, mock_kms_deprecated from nose.tools import assert_raises +from freezegun import freeze_time +from datetime import datetime, timedelta +from dateutil.tz import tzlocal @mock_kms_deprecated @@ -617,3 +620,100 @@ def test_kms_encrypt_boto3(): response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) response['Plaintext'].should.equal(b'bar') + + +@mock_kms +def test_disable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='disable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + + +@mock_kms +def test_enable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='enable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + client.enable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == 'Enabled' + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + response = client.cancel_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + assert 'DeletionDate' not in result["KeyMetadata"] diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 3f924cc55..e3d46fd87 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,5 +1,6 @@ import boto3 import sure # noqa +import six from botocore.exceptions import ClientError from moto import mock_logs, settings @@ -47,7 +48,7 @@ def test_exceptions(): logEvents=[ { 'timestamp': 0, - 'message': 'line' + 'message': 'line' }, ], ) @@ -79,7 +80,7 @@ def test_put_logs(): {'timestamp': 0, 'message': 'hello'}, {'timestamp': 0, 'message': 'world'} ] - conn.put_log_events( + putRes = conn.put_log_events( logGroupName=log_group_name, logStreamName=log_stream_name, logEvents=messages @@ -89,6 +90,9 @@ def test_put_logs(): logStreamName=log_stream_name ) events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 events.should.have.length_of(2) @@ -117,4 +121,8 @@ def test_filter_logs_interleaved(): interleaved=True, ) events = res['events'] - events.should.have.length_of(2) + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + diff --git a/tests/test_organizations/__init__.py b/tests/test_organizations/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py new file mode 100644 index 000000000..6548b1830 --- /dev/null +++ b/tests/test_organizations/organizations_test_utils.py @@ -0,0 +1,136 @@ +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py new file mode 100644 index 000000000..dfac5feeb --- /dev/null +++ b/tests/test_organizations/test_organizations_boto3.py @@ -0,0 +1,322 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 80dcd4f53..cf9805444 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -33,6 +33,7 @@ def test_create_database(): db_instance['DBInstanceIdentifier'].should.equal("db-master-1") db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) @mock_rds2 @@ -339,6 +340,49 @@ def test_create_db_snapshots(): snapshot.get('Engine').should.equal('postgres') snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) @mock_rds2 @@ -656,6 +700,117 @@ def test_remove_tags_db(): len(result['TagList']).should.equal(1) +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + @mock_rds2 def test_add_tags_option_group(): conn = boto3.client('rds', region_name='us-west-2') diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 6e027b86c..9208c92dd 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,5 +1,7 @@ from __future__ import unicode_literals +import datetime + import boto import boto3 from boto.redshift.exceptions import ( @@ -32,6 +34,8 @@ def test_create_cluster_boto3(): MasterUserPassword='password', ) response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) @mock_redshift diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 9a68d1bbb..6e339abb6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2471,6 +2471,72 @@ def test_boto3_delete_markers(): oldest['Key'].should.equal('key-with-versions-and-unicode-ó') +@mock_s3 +def test_boto3_multiple_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + # Delete the object twice to add multiple delete markers + s3.delete_object(Bucket=bucket_name, Key=key) + s3.delete_object(Bucket=bucket_name, Key=key) + + response = s3.list_object_versions(Bucket=bucket_name) + response['DeleteMarkers'].should.have.length_of(2) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + # Remove both delete markers to restore the object + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='3' + ) + + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions(Bucket=bucket_name) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + @mock_s3 def test_get_stream_gzipped(): payload = b"this is some stuff here" diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index d176e95c6..3d533a641 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -191,6 +191,127 @@ def test_lifecycle_with_eodm(): assert err.exception.response["Error"]["Code"] == "MalformedXML" +@mock_s3 +def test_lifecycle_with_nve(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionExpiration": { + "NoncurrentDays": 30 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 + + # TODO: Add test for failures due to missing children + + +@mock_s3 +def test_lifecycle_with_nvt(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionTransitions": [{ + "NoncurrentDays": 30, + "StorageClass": "ONEZONE_IA" + }], + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 + + # Change StorageClass: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" + + # With failures for missing children: + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 + + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_aimu(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 + + # Change DaysAfterInitiation: + lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 + + # TODO: Add test for failures due to missing children + + @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index c631fabb0..ec384a660 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -26,13 +26,13 @@ def test_get_secret_that_does_not_exist(): result = conn.get_secret_value(SecretId='i-dont-exist') @mock_secretsmanager -def test_get_secret_with_mismatched_id(): +def test_get_secret_that_does_not_match(): conn = boto3.client('secretsmanager', region_name='us-west-2') create_secret = conn.create_secret(Name='java-util-test-password', SecretString="foosecret") with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') + result = conn.get_secret_value(SecretId='i-dont-match') @mock_secretsmanager def test_create_secret(): @@ -179,3 +179,108 @@ def test_describe_secret_that_does_not_match(): with assert_raises(ClientError): result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=secret_name) + + assert rotated_secret + assert rotated_secret['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' + ) + assert rotated_secret['Name'] == secret_name + assert rotated_secret['VersionId'] != '' + +@mock_secretsmanager +def test_rotate_secret_enable_rotation(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + initial_description = conn.describe_secret(SecretId=secret_name) + assert initial_description + assert initial_description['RotationEnabled'] is False + assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 + + conn.rotate_secret(SecretId=secret_name, + RotationRules={'AutomaticallyAfterDays': 42}) + + rotated_description = conn.describe_secret(SecretId=secret_name) + assert rotated_description + assert rotated_description['RotationEnabled'] is True + assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', 'us-west-2') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + # Test is intentionally empty. Boto3 catches too short ClientRequestToken + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + ClientRequestToken=client_request_token) + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationLambdaARN=rotation_lambda_arn) + +@mock_secretsmanager +def test_rotate_secret_rotation_period_zero(): + # Test is intentionally empty. Boto3 catches zero day rotation period + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_rotation_period_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_rules = {'AutomaticallyAfterDays': 1001} + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationRules=rotation_rules) diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index 8c6f7b970..e573f9b67 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -49,6 +49,27 @@ def test_get_secret_that_does_not_exist(): assert json_data['message'] == "Secrets Manager can't find the specified secret" assert json_data['__type'] == 'ResourceNotFoundException' +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "i-dont-match", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + @mock_secretsmanager def test_create_secret(): @@ -133,3 +154,268 @@ def test_describe_secret_that_does_not_match(): json_data = json.loads(describe_secret.data.decode("utf-8")) assert json_data['message'] == "Secrets Manager can't find the specified secret" assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + assert json_data['Name'] == 'test-secret' + assert json_data['VersionId'] == client_request_token + +# @mock_secretsmanager +# def test_rotate_secret_enable_rotation(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post( +# '/', +# data={ +# "Name": "test-secret", +# "SecretString": "foosecret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# initial_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(initial_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is False +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 + +# rotate_secret = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 42} +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# rotated_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(rotated_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is True +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "RotationLambdaARN": rotation_lambda_arn}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." + assert json_data['__type'] == 'InvalidParameterException' + + +# +# The following tests should work, but fail on the embedded dict in +# RotationRules. The error message suggests a problem deeper in the code, which +# needs further investigation. +# + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_zero(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 0}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_too_long(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 1001}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index d3e4ca917..9beb9a3fa 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -40,6 +40,33 @@ def test_create_fifo_queue_fail(): raise RuntimeError('Should of raised InvalidParameterValue Exception') +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + @mock_sqs def test_create_queue_with_different_attributes_fail(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -1195,3 +1222,16 @@ def test_receive_messages_with_message_group_id_on_visibility_timeout(): messages = queue.receive_messages() messages.should.have.length_of(1) messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 7a0685d56..f8ef3a237 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -5,11 +5,12 @@ import botocore.exceptions import sure # noqa import datetime import uuid +import json from botocore.exceptions import ClientError from nose.tools import assert_raises -from moto import mock_ssm +from moto import mock_ssm, mock_cloudformation @mock_ssm @@ -668,3 +669,118 @@ def test_list_commands(): with assert_raises(ClientError): response = client.list_commands( CommandId=str(uuid.uuid4())) + +@mock_ssm +def test_get_command_invocation(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456', 'i-234567', 'i-345678'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + instance_id = 'i-345678' + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='aws:runShellScript') + + invocation_response['CommandId'].should.equal(cmd_id) + invocation_response['InstanceId'].should.equal(instance_id) + + # test the error case for an invalid instance id + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId='i-FAKE') + + # test the error case for an invalid plugin name + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='FAKE') + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Test Description", + "Value": "Test tag" + }, + { + "Key": "Test Name", + "Value": "Name tag for tests" + } + ] + } + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": { + "Name": "Test value to export" + } + }, + "PublicIP": { + "Value": "Test public ip" + } + } + } + + cloudformation_client = boto3.client( + 'cloudformation', + region_name='us-east-1') + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName='test_stack', + TemplateBody=stack_template_str, + Capabilities=('CAPABILITY_IAM', )) + + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + Targets=[{ + 'Key': 'tag:aws:cloudformation:stack-name', + 'Values': ('test_stack', )}], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + instance_ids = cmd['InstanceIds'] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_ids[0], + PluginName='aws:runShellScript') From 36d8f118e36a7cbdb1dd99667f599fd91aa26bcc Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Wed, 24 Oct 2018 14:53:08 +0200 Subject: [PATCH 004/125] implement `attach_policy`, `detach_policy` and `list_attached_policy` --- moto/iot/models.py | 22 +++++++++++++++++++ moto/iot/responses.py | 30 ++++++++++++++++++++++++++ tests/test_iot/test_iot.py | 44 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/moto/iot/models.py b/moto/iot/models.py index 931af192a..4789e045f 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -400,6 +400,28 @@ class IoTBackend(BaseBackend): self.policies[policy.name] = policy return policy + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + def list_policies(self): policies = self.policies.values() return policies diff --git a/moto/iot/responses.py b/moto/iot/responses.py index c71d4942a..f5e25fdbc 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +from urllib.parse import unquote from moto.core.responses import BaseResponse from .models import iot_backends @@ -234,6 +235,35 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def attach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + def attach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 759c7d3c7..7fbd66963 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -8,6 +8,50 @@ import boto3 from moto import mock_iot +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + @mock_iot def test_things(): client = boto3.client('iot', region_name='ap-northeast-1') From bb7e1197bc9474eb87dfa520f60fa8de57fbda0d Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:13:56 +0200 Subject: [PATCH 005/125] adding AWS IoT policy version handling [+] `list_policy_version` [+] `get_policy_version` [+] `create_policy_version` [+] `delete_policy_version` [+] `set_default_policy_version` --- moto/iot/models.py | 104 +++++++++++++++++++++++++++++++++++-- moto/iot/responses.py | 36 ++++++++++++- tests/test_iot/test_iot.py | 82 ++++++++++++++++++++++++++++- 3 files changed, 215 insertions(+), 7 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4789e045f..4bcab26eb 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -136,18 +136,19 @@ class FakeCertificate(BaseModel): class FakePolicy(BaseModel): - def __init__(self, name, document, region_name): + def __init__(self, name, document, region_name, default_version_id='1'): self.name = name self.document = document self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.version = '1' # TODO: handle version + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] def to_get_dict(self): return { 'policyName': self.name, 'policyArn': self.arn, 'policyDocument': self.document, - 'defaultVersionId': self.version + 'defaultVersionId': self.default_version_id } def to_dict_at_creation(self): @@ -155,7 +156,7 @@ class FakePolicy(BaseModel): 'policyName': self.name, 'policyArn': self.arn, 'policyDocument': self.document, - 'policyVersionId': self.version + 'policyVersionId': self.default_version_id } def to_dict(self): @@ -165,6 +166,50 @@ class FakePolicy(BaseModel): } +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + class FakeJob(BaseModel): JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) @@ -436,6 +481,57 @@ class IoTBackend(BaseBackend): policy = self.get_policy(policy_name) del self.policies[policy.name] + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + def _get_principal(self, principal_arn): """ raise ResourceNotFoundException diff --git a/moto/iot/responses.py b/moto/iot/responses.py index f5e25fdbc..66d5ddfd6 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -from urllib.parse import unquote +from six.moves.urllib.parse import parse_qs, urlparse, unquote from moto.core.responses import BaseResponse from .models import iot_backends @@ -235,6 +235,40 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + def attach_policy(self): policy_name = self._get_param("policyName") principal = self._get_param('target') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 7fbd66963..758ff8940 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,8 +1,7 @@ from __future__ import unicode_literals import json -import sure # noqa - +import sure #noqa import boto3 from moto import mock_iot @@ -52,6 +51,85 @@ def test_list_attached_policies(): policies['policies'].should.be.empty +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + @mock_iot def test_things(): client = boto3.client('iot', region_name='ap-northeast-1') From 1c7becb4f69b8ae171f8e72dedd997d2d31e6d73 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:19:35 +0200 Subject: [PATCH 006/125] :rotating_light: linting error --- moto/iot/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 66d5ddfd6..3ef5bc93e 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals import json -from six.moves.urllib.parse import parse_qs, urlparse, unquote +from six.moves.urllib.parse import unquote from moto.core.responses import BaseResponse from .models import iot_backends From 181b5539f670ae50b60d92a3e694cba62e73ce90 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Thu, 25 Oct 2018 12:45:35 +0200 Subject: [PATCH 007/125] :memo: update coverage to reflect changes made --- IMPLEMENTATION_COVERAGE.md | 9214 +++++++++++++++++++----------------- 1 file changed, 4786 insertions(+), 4428 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 7c68c0e31..a153b92fc 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,4428 +1,4786 @@ - -## acm - 41% implemented -- [X] add_tags_to_certificate -- [X] delete_certificate -- [ ] describe_certificate -- [ ] export_certificate -- [X] get_certificate -- [ ] import_certificate -- [ ] list_certificates -- [ ] list_tags_for_certificate -- [X] remove_tags_from_certificate -- [X] request_certificate -- [ ] resend_validation_email -- [ ] update_certificate_options - -## acm-pca - 0% implemented -- [ ] create_certificate_authority -- [ ] create_certificate_authority_audit_report -- [ ] delete_certificate_authority -- [ ] describe_certificate_authority -- [ ] describe_certificate_authority_audit_report -- [ ] get_certificate -- [ ] get_certificate_authority_certificate -- [ ] get_certificate_authority_csr -- [ ] import_certificate_authority_certificate -- [ ] issue_certificate -- [ ] list_certificate_authorities -- [ ] list_tags -- [ ] revoke_certificate -- [ ] tag_certificate_authority -- [ ] untag_certificate_authority -- [ ] update_certificate_authority - -## alexaforbusiness - 0% implemented -- [ ] associate_contact_with_address_book -- [ ] associate_device_with_room -- [ ] associate_skill_group_with_room -- [ ] create_address_book -- [ ] create_contact -- [ ] create_profile -- [ ] create_room -- [ ] create_skill_group -- [ ] create_user -- [ ] delete_address_book -- [ ] delete_contact -- [ ] delete_profile -- [ ] delete_room -- [ ] delete_room_skill_parameter -- [ ] delete_skill_group -- [ ] delete_user -- [ ] disassociate_contact_from_address_book -- [ ] disassociate_device_from_room -- [ ] disassociate_skill_group_from_room -- [ ] get_address_book -- [ ] get_contact -- [ ] get_device -- [ ] get_profile -- [ ] get_room -- [ ] get_room_skill_parameter -- [ ] get_skill_group -- [ ] list_skills -- [ ] list_tags -- [ ] put_room_skill_parameter -- [ ] resolve_room -- [ ] revoke_invitation -- [ ] search_address_books -- [ ] search_contacts -- [ ] search_devices -- [ ] search_profiles -- [ ] search_rooms -- [ ] search_skill_groups -- [ ] search_users -- [ ] send_invitation -- [ ] start_device_sync -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_address_book -- [ ] update_contact -- [ ] update_device -- [ ] update_profile -- [ ] update_room -- [ ] update_skill_group - -## apigateway - 24% implemented -- [ ] create_api_key -- [ ] create_authorizer -- [ ] create_base_path_mapping -- [X] create_deployment -- [ ] create_documentation_part -- [ ] create_documentation_version -- [ ] create_domain_name -- [ ] create_model -- [ ] create_request_validator -- [X] create_resource -- [X] create_rest_api -- [X] create_stage -- [X] create_usage_plan -- [X] create_usage_plan_key -- [ ] create_vpc_link -- [ ] delete_api_key -- [ ] delete_authorizer -- [ ] delete_base_path_mapping -- [ ] delete_client_certificate -- [X] delete_deployment -- [ ] delete_documentation_part -- [ ] delete_documentation_version -- [ ] delete_domain_name -- [ ] delete_gateway_response -- [X] delete_integration -- [X] delete_integration_response -- [ ] delete_method -- [X] delete_method_response -- [ ] delete_model -- [ ] delete_request_validator -- [X] delete_resource -- [X] delete_rest_api -- [ ] delete_stage -- [X] delete_usage_plan -- [X] delete_usage_plan_key -- [ ] delete_vpc_link -- [ ] flush_stage_authorizers_cache -- [ ] flush_stage_cache -- [ ] generate_client_certificate -- [ ] get_account -- [ ] get_api_key -- [ ] get_api_keys -- [ ] get_authorizer -- [ ] get_authorizers -- [ ] get_base_path_mapping -- [ ] get_base_path_mappings -- [ ] get_client_certificate -- [ ] get_client_certificates -- [X] get_deployment -- [X] get_deployments -- [ ] get_documentation_part -- [ ] get_documentation_parts -- [ ] get_documentation_version -- [ ] get_documentation_versions -- [ ] get_domain_name -- [ ] get_domain_names -- [ ] get_export -- [ ] get_gateway_response -- [ ] get_gateway_responses -- [X] get_integration -- [X] get_integration_response -- [X] get_method -- [X] get_method_response -- [ ] get_model -- [ ] get_model_template -- [ ] get_models -- [ ] get_request_validator -- [ ] get_request_validators -- [X] get_resource -- [ ] get_resources -- [X] get_rest_api -- [ ] get_rest_apis -- [ ] get_sdk -- [ ] get_sdk_type -- [ ] get_sdk_types -- [X] get_stage -- [X] get_stages -- [ ] get_tags -- [ ] get_usage -- [X] get_usage_plan -- [X] get_usage_plan_key -- [X] get_usage_plan_keys -- [X] get_usage_plans -- [ ] get_vpc_link -- [ ] get_vpc_links -- [ ] import_api_keys -- [ ] import_documentation_parts -- [ ] import_rest_api -- [ ] put_gateway_response -- [ ] put_integration -- [ ] put_integration_response -- [ ] put_method -- [ ] put_method_response -- [ ] put_rest_api -- [ ] tag_resource -- [ ] test_invoke_authorizer -- [ ] test_invoke_method -- [ ] untag_resource -- [ ] update_account -- [ ] update_api_key -- [ ] update_authorizer -- [ ] update_base_path_mapping -- [ ] update_client_certificate -- [ ] update_deployment -- [ ] update_documentation_part -- [ ] update_documentation_version -- [ ] update_domain_name -- [ ] update_gateway_response -- [ ] update_integration -- [ ] update_integration_response -- [ ] update_method -- [ ] update_method_response -- [ ] update_model -- [ ] update_request_validator -- [ ] update_resource -- [ ] update_rest_api -- [X] update_stage -- [ ] update_usage -- [ ] update_usage_plan -- [ ] update_vpc_link - -## application-autoscaling - 0% implemented -- [ ] delete_scaling_policy -- [ ] delete_scheduled_action -- [ ] deregister_scalable_target -- [ ] describe_scalable_targets -- [ ] describe_scaling_activities -- [ ] describe_scaling_policies -- [ ] describe_scheduled_actions -- [ ] put_scaling_policy -- [ ] put_scheduled_action -- [ ] register_scalable_target - -## appstream - 0% implemented -- [ ] associate_fleet -- [ ] copy_image -- [ ] create_directory_config -- [ ] create_fleet -- [ ] create_image_builder -- [ ] create_image_builder_streaming_url -- [ ] create_stack -- [ ] create_streaming_url -- [ ] delete_directory_config -- [ ] delete_fleet -- [ ] delete_image -- [ ] delete_image_builder -- [ ] delete_stack -- [ ] describe_directory_configs -- [ ] describe_fleets -- [ ] describe_image_builders -- [ ] describe_images -- [ ] describe_sessions -- [ ] describe_stacks -- [ ] disassociate_fleet -- [ ] expire_session -- [ ] list_associated_fleets -- [ ] list_associated_stacks -- [ ] list_tags_for_resource -- [ ] start_fleet -- [ ] start_image_builder -- [ ] stop_fleet -- [ ] stop_image_builder -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_directory_config -- [ ] update_fleet -- [ ] update_stack - -## appsync - 0% implemented -- [ ] create_api_key -- [ ] create_data_source -- [ ] create_graphql_api -- [ ] create_resolver -- [ ] create_type -- [ ] delete_api_key -- [ ] delete_data_source -- [ ] delete_graphql_api -- [ ] delete_resolver -- [ ] delete_type -- [ ] get_data_source -- [ ] get_graphql_api -- [ ] get_introspection_schema -- [ ] get_resolver -- [ ] get_schema_creation_status -- [ ] get_type -- [ ] list_api_keys -- [ ] list_data_sources -- [ ] list_graphql_apis -- [ ] list_resolvers -- [ ] list_types -- [ ] start_schema_creation -- [ ] update_api_key -- [ ] update_data_source -- [ ] update_graphql_api -- [ ] update_resolver -- [ ] update_type - -## athena - 0% implemented -- [ ] batch_get_named_query -- [ ] batch_get_query_execution -- [ ] create_named_query -- [ ] delete_named_query -- [ ] get_named_query -- [ ] get_query_execution -- [ ] get_query_results -- [ ] list_named_queries -- [ ] list_query_executions -- [ ] start_query_execution -- [ ] stop_query_execution - -## autoscaling - 44% implemented -- [X] attach_instances -- [X] attach_load_balancer_target_groups -- [X] attach_load_balancers -- [ ] complete_lifecycle_action -- [X] create_auto_scaling_group -- [X] create_launch_configuration -- [X] create_or_update_tags -- [X] delete_auto_scaling_group -- [X] delete_launch_configuration -- [ ] delete_lifecycle_hook -- [ ] delete_notification_configuration -- [X] delete_policy -- [ ] delete_scheduled_action -- [ ] delete_tags -- [ ] describe_account_limits -- [ ] describe_adjustment_types -- [X] describe_auto_scaling_groups -- [X] describe_auto_scaling_instances -- [ ] describe_auto_scaling_notification_types -- [X] describe_launch_configurations -- [ ] describe_lifecycle_hook_types -- [ ] describe_lifecycle_hooks -- [X] describe_load_balancer_target_groups -- [X] describe_load_balancers -- [ ] describe_metric_collection_types -- [ ] describe_notification_configurations -- [X] describe_policies -- [ ] describe_scaling_activities -- [ ] describe_scaling_process_types -- [ ] describe_scheduled_actions -- [ ] describe_tags -- [ ] describe_termination_policy_types -- [X] detach_instances -- [X] detach_load_balancer_target_groups -- [X] detach_load_balancers -- [ ] disable_metrics_collection -- [ ] enable_metrics_collection -- [ ] enter_standby -- [X] execute_policy -- [ ] exit_standby -- [ ] put_lifecycle_hook -- [ ] put_notification_configuration -- [ ] put_scaling_policy -- [ ] put_scheduled_update_group_action -- [ ] record_lifecycle_action_heartbeat -- [ ] resume_processes -- [X] set_desired_capacity -- [X] set_instance_health -- [ ] set_instance_protection -- [X] suspend_processes -- [ ] terminate_instance_in_auto_scaling_group -- [X] update_auto_scaling_group - -## autoscaling-plans - 0% implemented -- [ ] create_scaling_plan -- [ ] delete_scaling_plan -- [ ] describe_scaling_plan_resources -- [ ] describe_scaling_plans - -## batch - 93% implemented -- [ ] cancel_job -- [X] create_compute_environment -- [X] create_job_queue -- [X] delete_compute_environment -- [X] delete_job_queue -- [X] deregister_job_definition -- [X] describe_compute_environments -- [X] describe_job_definitions -- [X] describe_job_queues -- [X] describe_jobs -- [X] list_jobs -- [X] register_job_definition -- [X] submit_job -- [X] terminate_job -- [X] update_compute_environment -- [X] update_job_queue - -## budgets - 0% implemented -- [ ] create_budget -- [ ] create_notification -- [ ] create_subscriber -- [ ] delete_budget -- [ ] delete_notification -- [ ] delete_subscriber -- [ ] describe_budget -- [ ] describe_budgets -- [ ] describe_notifications_for_budget -- [ ] describe_subscribers_for_notification -- [ ] update_budget -- [ ] update_notification -- [ ] update_subscriber - -## ce - 0% implemented -- [ ] get_cost_and_usage -- [ ] get_dimension_values -- [ ] get_reservation_coverage -- [ ] get_reservation_purchase_recommendation -- [ ] get_reservation_utilization -- [ ] get_tags - -## cloud9 - 0% implemented -- [ ] create_environment_ec2 -- [ ] create_environment_membership -- [ ] delete_environment -- [ ] delete_environment_membership -- [ ] describe_environment_memberships -- [ ] describe_environment_status -- [ ] describe_environments -- [ ] list_environments -- [ ] update_environment -- [ ] update_environment_membership - -## clouddirectory - 0% implemented -- [ ] add_facet_to_object -- [ ] apply_schema -- [ ] attach_object -- [ ] attach_policy -- [ ] attach_to_index -- [ ] attach_typed_link -- [ ] batch_read -- [ ] batch_write -- [ ] create_directory -- [ ] create_facet -- [ ] create_index -- [ ] create_object -- [ ] create_schema -- [ ] create_typed_link_facet -- [ ] delete_directory -- [ ] delete_facet -- [ ] delete_object -- [ ] delete_schema -- [ ] delete_typed_link_facet -- [ ] detach_from_index -- [ ] detach_object -- [ ] detach_policy -- [ ] detach_typed_link -- [ ] disable_directory -- [ ] enable_directory -- [ ] get_applied_schema_version -- [ ] get_directory -- [ ] get_facet -- [ ] get_object_attributes -- [ ] get_object_information -- [ ] get_schema_as_json -- [ ] get_typed_link_facet_information -- [ ] list_applied_schema_arns -- [ ] list_attached_indices -- [ ] list_development_schema_arns -- [ ] list_directories -- [ ] list_facet_attributes -- [ ] list_facet_names -- [ ] list_incoming_typed_links -- [ ] list_index -- [ ] list_object_attributes -- [ ] list_object_children -- [ ] list_object_parent_paths -- [ ] list_object_parents -- [ ] list_object_policies -- [ ] list_outgoing_typed_links -- [ ] list_policy_attachments -- [ ] list_published_schema_arns -- [ ] list_tags_for_resource -- [ ] list_typed_link_facet_attributes -- [ ] list_typed_link_facet_names -- [ ] lookup_policy -- [ ] publish_schema -- [ ] put_schema_from_json -- [ ] remove_facet_from_object -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_facet -- [ ] update_object_attributes -- [ ] update_schema -- [ ] update_typed_link_facet -- [ ] upgrade_applied_schema -- [ ] upgrade_published_schema - -## cloudformation - 21% implemented -- [ ] cancel_update_stack -- [ ] continue_update_rollback -- [X] create_change_set -- [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set -- [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set -- [ ] describe_account_limits -- [ ] describe_change_set -- [ ] describe_stack_events -- [ ] describe_stack_instance -- [ ] describe_stack_resource -- [ ] describe_stack_resources -- [ ] describe_stack_set -- [ ] describe_stack_set_operation -- [X] describe_stacks -- [ ] estimate_template_cost -- [X] execute_change_set -- [ ] get_stack_policy -- [ ] get_template -- [ ] get_template_summary -- [ ] list_change_sets -- [X] list_exports -- [ ] list_imports -- [ ] list_stack_instances -- [X] list_stack_resources -- [ ] list_stack_set_operation_results -- [ ] list_stack_set_operations -- [ ] list_stack_sets -- [X] list_stacks -- [ ] set_stack_policy -- [ ] signal_resource -- [ ] stop_stack_set_operation -- [X] update_stack -- [ ] update_stack_instances -- [ ] update_stack_set -- [ ] update_termination_protection -- [ ] validate_template - -## cloudfront - 0% implemented -- [ ] create_cloud_front_origin_access_identity -- [ ] create_distribution -- [ ] create_distribution_with_tags -- [ ] create_field_level_encryption_config -- [ ] create_field_level_encryption_profile -- [ ] create_invalidation -- [ ] create_public_key -- [ ] create_streaming_distribution -- [ ] create_streaming_distribution_with_tags -- [ ] delete_cloud_front_origin_access_identity -- [ ] delete_distribution -- [ ] delete_field_level_encryption_config -- [ ] delete_field_level_encryption_profile -- [ ] delete_public_key -- [ ] delete_service_linked_role -- [ ] delete_streaming_distribution -- [ ] get_cloud_front_origin_access_identity -- [ ] get_cloud_front_origin_access_identity_config -- [ ] get_distribution -- [ ] get_distribution_config -- [ ] get_field_level_encryption -- [ ] get_field_level_encryption_config -- [ ] get_field_level_encryption_profile -- [ ] get_field_level_encryption_profile_config -- [ ] get_invalidation -- [ ] get_public_key -- [ ] get_public_key_config -- [ ] get_streaming_distribution -- [ ] get_streaming_distribution_config -- [ ] list_cloud_front_origin_access_identities -- [ ] list_distributions -- [ ] list_distributions_by_web_acl_id -- [ ] list_field_level_encryption_configs -- [ ] list_field_level_encryption_profiles -- [ ] list_invalidations -- [ ] list_public_keys -- [ ] list_streaming_distributions -- [ ] list_tags_for_resource -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cloud_front_origin_access_identity -- [ ] update_distribution -- [ ] update_field_level_encryption_config -- [ ] update_field_level_encryption_profile -- [ ] update_public_key -- [ ] update_streaming_distribution - -## cloudhsm - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_hapg -- [ ] create_hsm -- [ ] create_luna_client -- [ ] delete_hapg -- [ ] delete_hsm -- [ ] delete_luna_client -- [ ] describe_hapg -- [ ] describe_hsm -- [ ] describe_luna_client -- [ ] get_config -- [ ] list_available_zones -- [ ] list_hapgs -- [ ] list_hsms -- [ ] list_luna_clients -- [ ] list_tags_for_resource -- [ ] modify_hapg -- [ ] modify_hsm -- [ ] modify_luna_client -- [ ] remove_tags_from_resource - -## cloudhsmv2 - 0% implemented -- [ ] create_cluster -- [ ] create_hsm -- [ ] delete_cluster -- [ ] delete_hsm -- [ ] describe_backups -- [ ] describe_clusters -- [ ] initialize_cluster -- [ ] list_tags -- [ ] tag_resource -- [ ] untag_resource - -## cloudsearch - 0% implemented -- [ ] build_suggesters -- [ ] create_domain -- [ ] define_analysis_scheme -- [ ] define_expression -- [ ] define_index_field -- [ ] define_suggester -- [ ] delete_analysis_scheme -- [ ] delete_domain -- [ ] delete_expression -- [ ] delete_index_field -- [ ] delete_suggester -- [ ] describe_analysis_schemes -- [ ] describe_availability_options -- [ ] describe_domains -- [ ] describe_expressions -- [ ] describe_index_fields -- [ ] describe_scaling_parameters -- [ ] describe_service_access_policies -- [ ] describe_suggesters -- [ ] index_documents -- [ ] list_domain_names -- [ ] update_availability_options -- [ ] update_scaling_parameters -- [ ] update_service_access_policies - -## cloudsearchdomain - 0% implemented -- [ ] search -- [ ] suggest -- [ ] upload_documents - -## cloudtrail - 0% implemented -- [ ] add_tags -- [ ] create_trail -- [ ] delete_trail -- [ ] describe_trails -- [ ] get_event_selectors -- [ ] get_trail_status -- [ ] list_public_keys -- [ ] list_tags -- [ ] lookup_events -- [ ] put_event_selectors -- [ ] remove_tags -- [ ] start_logging -- [ ] stop_logging -- [ ] update_trail - -## cloudwatch - 56% implemented -- [X] delete_alarms -- [X] delete_dashboards -- [ ] describe_alarm_history -- [ ] describe_alarms -- [ ] describe_alarms_for_metric -- [ ] disable_alarm_actions -- [ ] enable_alarm_actions -- [X] get_dashboard -- [ ] get_metric_data -- [X] get_metric_statistics -- [X] list_dashboards -- [ ] list_metrics -- [X] put_dashboard -- [X] put_metric_alarm -- [X] put_metric_data -- [X] set_alarm_state - -## codebuild - 0% implemented -- [ ] batch_delete_builds -- [ ] batch_get_builds -- [ ] batch_get_projects -- [ ] create_project -- [ ] create_webhook -- [ ] delete_project -- [ ] delete_webhook -- [ ] invalidate_project_cache -- [ ] list_builds -- [ ] list_builds_for_project -- [ ] list_curated_environment_images -- [ ] list_projects -- [ ] start_build -- [ ] stop_build -- [ ] update_project -- [ ] update_webhook - -## codecommit - 0% implemented -- [ ] batch_get_repositories -- [ ] create_branch -- [ ] create_pull_request -- [ ] create_repository -- [ ] delete_branch -- [ ] delete_comment_content -- [ ] delete_repository -- [ ] describe_pull_request_events -- [ ] get_blob -- [ ] get_branch -- [ ] get_comment -- [ ] get_comments_for_compared_commit -- [ ] get_comments_for_pull_request -- [ ] get_commit -- [ ] get_differences -- [ ] get_merge_conflicts -- [ ] get_pull_request -- [ ] get_repository -- [ ] get_repository_triggers -- [ ] list_branches -- [ ] list_pull_requests -- [ ] list_repositories -- [ ] merge_pull_request_by_fast_forward -- [ ] post_comment_for_compared_commit -- [ ] post_comment_for_pull_request -- [ ] post_comment_reply -- [ ] put_file -- [ ] put_repository_triggers -- [ ] test_repository_triggers -- [ ] update_comment -- [ ] update_default_branch -- [ ] update_pull_request_description -- [ ] update_pull_request_status -- [ ] update_pull_request_title -- [ ] update_repository_description -- [ ] update_repository_name - -## codedeploy - 0% implemented -- [ ] add_tags_to_on_premises_instances -- [ ] batch_get_application_revisions -- [ ] batch_get_applications -- [ ] batch_get_deployment_groups -- [ ] batch_get_deployment_instances -- [ ] batch_get_deployments -- [ ] batch_get_on_premises_instances -- [ ] continue_deployment -- [ ] create_application -- [ ] create_deployment -- [ ] create_deployment_config -- [ ] create_deployment_group -- [ ] delete_application -- [ ] delete_deployment_config -- [ ] delete_deployment_group -- [ ] delete_git_hub_account_token -- [ ] deregister_on_premises_instance -- [ ] get_application -- [ ] get_application_revision -- [ ] get_deployment -- [ ] get_deployment_config -- [ ] get_deployment_group -- [ ] get_deployment_instance -- [ ] get_on_premises_instance -- [ ] list_application_revisions -- [ ] list_applications -- [ ] list_deployment_configs -- [ ] list_deployment_groups -- [ ] list_deployment_instances -- [ ] list_deployments -- [ ] list_git_hub_account_token_names -- [ ] list_on_premises_instances -- [ ] put_lifecycle_event_hook_execution_status -- [ ] register_application_revision -- [ ] register_on_premises_instance -- [ ] remove_tags_from_on_premises_instances -- [ ] skip_wait_time_for_instance_termination -- [ ] stop_deployment -- [ ] update_application -- [ ] update_deployment_group - -## codepipeline - 0% implemented -- [ ] acknowledge_job -- [ ] acknowledge_third_party_job -- [ ] create_custom_action_type -- [ ] create_pipeline -- [ ] delete_custom_action_type -- [ ] delete_pipeline -- [ ] disable_stage_transition -- [ ] enable_stage_transition -- [ ] get_job_details -- [ ] get_pipeline -- [ ] get_pipeline_execution -- [ ] get_pipeline_state -- [ ] get_third_party_job_details -- [ ] list_action_types -- [ ] list_pipeline_executions -- [ ] list_pipelines -- [ ] poll_for_jobs -- [ ] poll_for_third_party_jobs -- [ ] put_action_revision -- [ ] put_approval_result -- [ ] put_job_failure_result -- [ ] put_job_success_result -- [ ] put_third_party_job_failure_result -- [ ] put_third_party_job_success_result -- [ ] retry_stage_execution -- [ ] start_pipeline_execution -- [ ] update_pipeline - -## codestar - 0% implemented -- [ ] associate_team_member -- [ ] create_project -- [ ] create_user_profile -- [ ] delete_project -- [ ] delete_user_profile -- [ ] describe_project -- [ ] describe_user_profile -- [ ] disassociate_team_member -- [ ] list_projects -- [ ] list_resources -- [ ] list_tags_for_project -- [ ] list_team_members -- [ ] list_user_profiles -- [ ] tag_project -- [ ] untag_project -- [ ] update_project -- [ ] update_team_member -- [ ] update_user_profile - -## cognito-identity - 0% implemented -- [ ] create_identity_pool -- [ ] delete_identities -- [ ] delete_identity_pool -- [ ] describe_identity -- [ ] describe_identity_pool -- [ ] get_credentials_for_identity -- [ ] get_id -- [ ] get_identity_pool_roles -- [ ] get_open_id_token -- [ ] get_open_id_token_for_developer_identity -- [ ] list_identities -- [ ] list_identity_pools -- [ ] lookup_developer_identity -- [ ] merge_developer_identities -- [ ] set_identity_pool_roles -- [ ] unlink_developer_identity -- [ ] unlink_identity -- [ ] update_identity_pool - -## cognito-idp - 0% implemented -- [ ] add_custom_attributes -- [ ] admin_add_user_to_group -- [ ] admin_confirm_sign_up -- [ ] admin_create_user -- [ ] admin_delete_user -- [ ] admin_delete_user_attributes -- [ ] admin_disable_provider_for_user -- [X] admin_disable_user -- [X] admin_enable_user -- [ ] admin_forget_device -- [ ] admin_get_device -- [ ] admin_get_user -- [ ] admin_initiate_auth -- [ ] admin_link_provider_for_user -- [ ] admin_list_devices -- [ ] admin_list_groups_for_user -- [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group -- [ ] admin_reset_user_password -- [ ] admin_respond_to_auth_challenge -- [ ] admin_set_user_mfa_preference -- [ ] admin_set_user_settings -- [ ] admin_update_auth_event_feedback -- [ ] admin_update_device_status -- [ ] admin_update_user_attributes -- [ ] admin_user_global_sign_out -- [ ] associate_software_token -- [ ] change_password -- [ ] confirm_device -- [ ] confirm_forgot_password -- [ ] confirm_sign_up -- [ ] create_group -- [ ] create_identity_provider -- [ ] create_resource_server -- [ ] create_user_import_job -- [ ] create_user_pool -- [ ] create_user_pool_client -- [ ] create_user_pool_domain -- [ ] delete_group -- [ ] delete_identity_provider -- [ ] delete_resource_server -- [ ] delete_user -- [ ] delete_user_attributes -- [ ] delete_user_pool -- [ ] delete_user_pool_client -- [ ] delete_user_pool_domain -- [ ] describe_identity_provider -- [ ] describe_resource_server -- [ ] describe_risk_configuration -- [ ] describe_user_import_job -- [ ] describe_user_pool -- [ ] describe_user_pool_client -- [ ] describe_user_pool_domain -- [ ] forget_device -- [ ] forgot_password -- [ ] get_csv_header -- [ ] get_device -- [ ] get_group -- [ ] get_identity_provider_by_identifier -- [ ] get_signing_certificate -- [ ] get_ui_customization -- [ ] get_user -- [ ] get_user_attribute_verification_code -- [ ] get_user_pool_mfa_config -- [ ] global_sign_out -- [ ] initiate_auth -- [ ] list_devices -- [ ] list_groups -- [ ] list_identity_providers -- [ ] list_resource_servers -- [ ] list_user_import_jobs -- [ ] list_user_pool_clients -- [ ] list_user_pools -- [ ] list_users -- [ ] list_users_in_group -- [ ] resend_confirmation_code -- [ ] respond_to_auth_challenge -- [ ] set_risk_configuration -- [ ] set_ui_customization -- [ ] set_user_mfa_preference -- [ ] set_user_pool_mfa_config -- [ ] set_user_settings -- [ ] sign_up -- [ ] start_user_import_job -- [ ] stop_user_import_job -- [ ] update_auth_event_feedback -- [ ] update_device_status -- [ ] update_group -- [ ] update_identity_provider -- [ ] update_resource_server -- [ ] update_user_attributes -- [ ] update_user_pool -- [ ] update_user_pool_client -- [ ] verify_software_token -- [ ] verify_user_attribute - -## cognito-sync - 0% implemented -- [ ] bulk_publish -- [ ] delete_dataset -- [ ] describe_dataset -- [ ] describe_identity_pool_usage -- [ ] describe_identity_usage -- [ ] get_bulk_publish_details -- [ ] get_cognito_events -- [ ] get_identity_pool_configuration -- [ ] list_datasets -- [ ] list_identity_pool_usage -- [ ] list_records -- [ ] register_device -- [ ] set_cognito_events -- [ ] set_identity_pool_configuration -- [ ] subscribe_to_dataset -- [ ] unsubscribe_from_dataset -- [ ] update_records - -## comprehend - 0% implemented -- [ ] batch_detect_dominant_language -- [ ] batch_detect_entities -- [ ] batch_detect_key_phrases -- [ ] batch_detect_sentiment -- [ ] describe_topics_detection_job -- [ ] detect_dominant_language -- [ ] detect_entities -- [ ] detect_key_phrases -- [ ] detect_sentiment -- [ ] list_topics_detection_jobs -- [ ] start_topics_detection_job - -## config - 0% implemented -- [ ] batch_get_resource_config -- [ ] delete_aggregation_authorization -- [ ] delete_config_rule -- [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel -- [ ] delete_evaluation_results -- [ ] delete_pending_aggregation_request -- [ ] deliver_config_snapshot -- [ ] describe_aggregate_compliance_by_config_rules -- [ ] describe_aggregation_authorizations -- [ ] describe_compliance_by_config_rule -- [ ] describe_compliance_by_resource -- [ ] describe_config_rule_evaluation_status -- [ ] describe_config_rules -- [ ] describe_configuration_aggregator_sources_status -- [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders -- [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels -- [ ] describe_pending_aggregation_requests -- [ ] get_aggregate_compliance_details_by_config_rule -- [ ] get_aggregate_config_rule_compliance_summary -- [ ] get_compliance_details_by_config_rule -- [ ] get_compliance_details_by_resource -- [ ] get_compliance_summary_by_config_rule -- [ ] get_compliance_summary_by_resource_type -- [ ] get_discovered_resource_counts -- [ ] get_resource_config_history -- [ ] list_discovered_resources -- [ ] put_aggregation_authorization -- [ ] put_config_rule -- [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel -- [ ] put_evaluations -- [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder - -## connect - 0% implemented -- [ ] start_outbound_voice_contact -- [ ] stop_contact - -## cur - 0% implemented -- [ ] delete_report_definition -- [ ] describe_report_definitions -- [ ] put_report_definition - -## datapipeline - 42% implemented -- [X] activate_pipeline -- [ ] add_tags -- [X] create_pipeline -- [ ] deactivate_pipeline -- [X] delete_pipeline -- [X] describe_objects -- [X] describe_pipelines -- [ ] evaluate_expression -- [X] get_pipeline_definition -- [X] list_pipelines -- [ ] poll_for_task -- [X] put_pipeline_definition -- [ ] query_objects -- [ ] remove_tags -- [ ] report_task_progress -- [ ] report_task_runner_heartbeat -- [ ] set_status -- [ ] set_task_status -- [ ] validate_pipeline_definition - -## dax - 0% implemented -- [ ] create_cluster -- [ ] create_parameter_group -- [ ] create_subnet_group -- [ ] decrease_replication_factor -- [ ] delete_cluster -- [ ] delete_parameter_group -- [ ] delete_subnet_group -- [ ] describe_clusters -- [ ] describe_default_parameters -- [ ] describe_events -- [ ] describe_parameter_groups -- [ ] describe_parameters -- [ ] describe_subnet_groups -- [ ] increase_replication_factor -- [ ] list_tags -- [ ] reboot_node -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_cluster -- [ ] update_parameter_group -- [ ] update_subnet_group - -## devicefarm - 0% implemented -- [ ] create_device_pool -- [ ] create_instance_profile -- [ ] create_network_profile -- [ ] create_project -- [ ] create_remote_access_session -- [ ] create_upload -- [ ] delete_device_pool -- [ ] delete_instance_profile -- [ ] delete_network_profile -- [ ] delete_project -- [ ] delete_remote_access_session -- [ ] delete_run -- [ ] delete_upload -- [ ] get_account_settings -- [ ] get_device -- [ ] get_device_instance -- [ ] get_device_pool -- [ ] get_device_pool_compatibility -- [ ] get_instance_profile -- [ ] get_job -- [ ] get_network_profile -- [ ] get_offering_status -- [ ] get_project -- [ ] get_remote_access_session -- [ ] get_run -- [ ] get_suite -- [ ] get_test -- [ ] get_upload -- [ ] install_to_remote_access_session -- [ ] list_artifacts -- [ ] list_device_instances -- [ ] list_device_pools -- [ ] list_devices -- [ ] list_instance_profiles -- [ ] list_jobs -- [ ] list_network_profiles -- [ ] list_offering_promotions -- [ ] list_offering_transactions -- [ ] list_offerings -- [ ] list_projects -- [ ] list_remote_access_sessions -- [ ] list_runs -- [ ] list_samples -- [ ] list_suites -- [ ] list_tests -- [ ] list_unique_problems -- [ ] list_uploads -- [ ] purchase_offering -- [ ] renew_offering -- [ ] schedule_run -- [ ] stop_remote_access_session -- [ ] stop_run -- [ ] update_device_instance -- [ ] update_device_pool -- [ ] update_instance_profile -- [ ] update_network_profile -- [ ] update_project - -## directconnect - 0% implemented -- [ ] allocate_connection_on_interconnect -- [ ] allocate_hosted_connection -- [ ] allocate_private_virtual_interface -- [ ] allocate_public_virtual_interface -- [ ] associate_connection_with_lag -- [ ] associate_hosted_connection -- [ ] associate_virtual_interface -- [ ] confirm_connection -- [ ] confirm_private_virtual_interface -- [ ] confirm_public_virtual_interface -- [ ] create_bgp_peer -- [ ] create_connection -- [ ] create_direct_connect_gateway -- [ ] create_direct_connect_gateway_association -- [ ] create_interconnect -- [ ] create_lag -- [ ] create_private_virtual_interface -- [ ] create_public_virtual_interface -- [ ] delete_bgp_peer -- [ ] delete_connection -- [ ] delete_direct_connect_gateway -- [ ] delete_direct_connect_gateway_association -- [ ] delete_interconnect -- [ ] delete_lag -- [ ] delete_virtual_interface -- [ ] describe_connection_loa -- [ ] describe_connections -- [ ] describe_connections_on_interconnect -- [ ] describe_direct_connect_gateway_associations -- [ ] describe_direct_connect_gateway_attachments -- [ ] describe_direct_connect_gateways -- [ ] describe_hosted_connections -- [ ] describe_interconnect_loa -- [ ] describe_interconnects -- [ ] describe_lags -- [ ] describe_loa -- [ ] describe_locations -- [ ] describe_tags -- [ ] describe_virtual_gateways -- [ ] describe_virtual_interfaces -- [ ] disassociate_connection_from_lag -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_lag - -## discovery - 0% implemented -- [ ] associate_configuration_items_to_application -- [ ] create_application -- [ ] create_tags -- [ ] delete_applications -- [ ] delete_tags -- [ ] describe_agents -- [ ] describe_configurations -- [ ] describe_export_configurations -- [ ] describe_export_tasks -- [ ] describe_tags -- [ ] disassociate_configuration_items_from_application -- [ ] export_configurations -- [ ] get_discovery_summary -- [ ] list_configurations -- [ ] list_server_neighbors -- [ ] start_data_collection_by_agent_ids -- [ ] start_export_task -- [ ] stop_data_collection_by_agent_ids -- [ ] update_application - -## dms - 0% implemented -- [ ] add_tags_to_resource -- [ ] create_endpoint -- [ ] create_event_subscription -- [ ] create_replication_instance -- [ ] create_replication_subnet_group -- [ ] create_replication_task -- [ ] delete_certificate -- [ ] delete_endpoint -- [ ] delete_event_subscription -- [ ] delete_replication_instance -- [ ] delete_replication_subnet_group -- [ ] delete_replication_task -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_connections -- [ ] describe_endpoint_types -- [ ] describe_endpoints -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_orderable_replication_instances -- [ ] describe_refresh_schemas_status -- [ ] describe_replication_instance_task_logs -- [ ] describe_replication_instances -- [ ] describe_replication_subnet_groups -- [ ] describe_replication_task_assessment_results -- [ ] describe_replication_tasks -- [ ] describe_schemas -- [ ] describe_table_statistics -- [ ] import_certificate -- [ ] list_tags_for_resource -- [ ] modify_endpoint -- [ ] modify_event_subscription -- [ ] modify_replication_instance -- [ ] modify_replication_subnet_group -- [ ] modify_replication_task -- [ ] reboot_replication_instance -- [ ] refresh_schemas -- [ ] reload_tables -- [ ] remove_tags_from_resource -- [ ] start_replication_task -- [ ] start_replication_task_assessment -- [ ] stop_replication_task -- [ ] test_connection - -## ds - 0% implemented -- [ ] add_ip_routes -- [ ] add_tags_to_resource -- [ ] cancel_schema_extension -- [ ] connect_directory -- [ ] create_alias -- [ ] create_computer -- [ ] create_conditional_forwarder -- [ ] create_directory -- [ ] create_microsoft_ad -- [ ] create_snapshot -- [ ] create_trust -- [ ] delete_conditional_forwarder -- [ ] delete_directory -- [ ] delete_snapshot -- [ ] delete_trust -- [ ] deregister_event_topic -- [ ] describe_conditional_forwarders -- [ ] describe_directories -- [ ] describe_domain_controllers -- [ ] describe_event_topics -- [ ] describe_snapshots -- [ ] describe_trusts -- [ ] disable_radius -- [ ] disable_sso -- [ ] enable_radius -- [ ] enable_sso -- [ ] get_directory_limits -- [ ] get_snapshot_limits -- [ ] list_ip_routes -- [ ] list_schema_extensions -- [ ] list_tags_for_resource -- [ ] register_event_topic -- [ ] remove_ip_routes -- [ ] remove_tags_from_resource -- [ ] restore_from_snapshot -- [ ] start_schema_extension -- [ ] update_conditional_forwarder -- [ ] update_number_of_domain_controllers -- [ ] update_radius -- [ ] verify_trust - -## dynamodb - 22% implemented -- [ ] batch_get_item -- [ ] batch_write_item -- [ ] create_backup -- [ ] create_global_table -- [X] create_table -- [ ] delete_backup -- [X] delete_item -- [X] delete_table -- [ ] describe_backup -- [ ] describe_continuous_backups -- [ ] describe_global_table -- [ ] describe_limits -- [ ] describe_table -- [ ] describe_time_to_live -- [X] get_item -- [ ] list_backups -- [ ] list_global_tables -- [ ] list_tables -- [ ] list_tags_of_resource -- [X] put_item -- [X] query -- [ ] restore_table_from_backup -- [ ] restore_table_to_point_in_time -- [X] scan -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_continuous_backups -- [ ] update_global_table -- [ ] update_item -- [ ] update_table -- [ ] update_time_to_live - -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams - -## ec2 - 37% implemented -- [ ] accept_reserved_instances_exchange_quote -- [ ] accept_vpc_endpoint_connections -- [X] accept_vpc_peering_connection -- [X] allocate_address -- [ ] allocate_hosts -- [ ] assign_ipv6_addresses -- [ ] assign_private_ip_addresses -- [X] associate_address -- [X] associate_dhcp_options -- [ ] associate_iam_instance_profile -- [X] associate_route_table -- [ ] associate_subnet_cidr_block -- [X] associate_vpc_cidr_block -- [ ] attach_classic_link_vpc -- [X] attach_internet_gateway -- [X] attach_network_interface -- [X] attach_volume -- [X] attach_vpn_gateway -- [X] authorize_security_group_egress -- [X] authorize_security_group_ingress -- [ ] bundle_instance -- [ ] cancel_bundle_task -- [ ] cancel_conversion_task -- [ ] cancel_export_task -- [ ] cancel_import_task -- [ ] cancel_reserved_instances_listing -- [X] cancel_spot_fleet_requests -- [X] cancel_spot_instance_requests -- [ ] confirm_product_instance -- [ ] copy_fpga_image -- [X] copy_image -- [X] copy_snapshot -- [X] create_customer_gateway -- [ ] create_default_subnet -- [ ] create_default_vpc -- [X] create_dhcp_options -- [ ] create_egress_only_internet_gateway -- [ ] create_flow_logs -- [ ] create_fpga_image -- [X] create_image -- [ ] create_instance_export_task -- [X] create_internet_gateway -- [X] create_key_pair -- [ ] create_launch_template -- [ ] create_launch_template_version -- [X] create_nat_gateway -- [X] create_network_acl -- [X] create_network_acl_entry -- [X] create_network_interface -- [ ] create_network_interface_permission -- [ ] create_placement_group -- [ ] create_reserved_instances_listing -- [X] create_route -- [X] create_route_table -- [X] create_security_group -- [X] create_snapshot -- [ ] create_spot_datafeed_subscription -- [X] create_subnet -- [X] create_tags -- [X] create_volume -- [X] create_vpc -- [ ] create_vpc_endpoint -- [ ] create_vpc_endpoint_connection_notification -- [ ] create_vpc_endpoint_service_configuration -- [X] create_vpc_peering_connection -- [X] create_vpn_connection -- [ ] create_vpn_connection_route -- [X] create_vpn_gateway -- [X] delete_customer_gateway -- [ ] delete_dhcp_options -- [ ] delete_egress_only_internet_gateway -- [ ] delete_flow_logs -- [ ] delete_fpga_image -- [X] delete_internet_gateway -- [X] delete_key_pair -- [ ] delete_launch_template -- [ ] delete_launch_template_versions -- [X] delete_nat_gateway -- [X] delete_network_acl -- [X] delete_network_acl_entry -- [X] delete_network_interface -- [ ] delete_network_interface_permission -- [ ] delete_placement_group -- [X] delete_route -- [X] delete_route_table -- [X] delete_security_group -- [X] delete_snapshot -- [ ] delete_spot_datafeed_subscription -- [X] delete_subnet -- [X] delete_tags -- [X] delete_volume -- [X] delete_vpc -- [ ] delete_vpc_endpoint_connection_notifications -- [ ] delete_vpc_endpoint_service_configurations -- [ ] delete_vpc_endpoints -- [X] delete_vpc_peering_connection -- [X] delete_vpn_connection -- [ ] delete_vpn_connection_route -- [X] delete_vpn_gateway -- [X] deregister_image -- [ ] describe_account_attributes -- [X] describe_addresses -- [ ] describe_aggregate_id_format -- [X] describe_availability_zones -- [ ] describe_bundle_tasks -- [ ] describe_classic_link_instances -- [ ] describe_conversion_tasks -- [ ] describe_customer_gateways -- [X] describe_dhcp_options -- [ ] describe_egress_only_internet_gateways -- [ ] describe_elastic_gpus -- [ ] describe_export_tasks -- [ ] describe_flow_logs -- [ ] describe_fpga_image_attribute -- [ ] describe_fpga_images -- [ ] describe_host_reservation_offerings -- [ ] describe_host_reservations -- [ ] describe_hosts -- [ ] describe_iam_instance_profile_associations -- [ ] describe_id_format -- [ ] describe_identity_id_format -- [ ] describe_image_attribute -- [X] describe_images -- [ ] describe_import_image_tasks -- [ ] describe_import_snapshot_tasks -- [X] describe_instance_attribute -- [ ] describe_instance_credit_specifications -- [ ] describe_instance_status -- [ ] describe_instances -- [X] describe_internet_gateways -- [X] describe_key_pairs -- [ ] describe_launch_template_versions -- [ ] describe_launch_templates -- [ ] describe_moving_addresses -- [ ] describe_nat_gateways -- [ ] describe_network_acls -- [ ] describe_network_interface_attribute -- [ ] describe_network_interface_permissions -- [X] describe_network_interfaces -- [ ] describe_placement_groups -- [ ] describe_prefix_lists -- [ ] describe_principal_id_format -- [X] describe_regions -- [ ] describe_reserved_instances -- [ ] describe_reserved_instances_listings -- [ ] describe_reserved_instances_modifications -- [ ] describe_reserved_instances_offerings -- [ ] describe_route_tables -- [ ] describe_scheduled_instance_availability -- [ ] describe_scheduled_instances -- [ ] describe_security_group_references -- [X] describe_security_groups -- [ ] describe_snapshot_attribute -- [X] describe_snapshots -- [ ] describe_spot_datafeed_subscription -- [X] describe_spot_fleet_instances -- [ ] describe_spot_fleet_request_history -- [X] describe_spot_fleet_requests -- [X] describe_spot_instance_requests -- [ ] describe_spot_price_history -- [ ] describe_stale_security_groups -- [ ] describe_subnets -- [X] describe_tags -- [ ] describe_volume_attribute -- [ ] describe_volume_status -- [X] describe_volumes -- [ ] describe_volumes_modifications -- [X] describe_vpc_attribute -- [ ] describe_vpc_classic_link -- [ ] describe_vpc_classic_link_dns_support -- [ ] describe_vpc_endpoint_connection_notifications -- [ ] describe_vpc_endpoint_connections -- [ ] describe_vpc_endpoint_service_configurations -- [ ] describe_vpc_endpoint_service_permissions -- [ ] describe_vpc_endpoint_services -- [ ] describe_vpc_endpoints -- [ ] describe_vpc_peering_connections -- [ ] describe_vpcs -- [X] describe_vpn_connections -- [ ] describe_vpn_gateways -- [ ] detach_classic_link_vpc -- [X] detach_internet_gateway -- [X] detach_network_interface -- [X] detach_volume -- [X] detach_vpn_gateway -- [ ] disable_vgw_route_propagation -- [ ] disable_vpc_classic_link -- [ ] disable_vpc_classic_link_dns_support -- [X] disassociate_address -- [ ] disassociate_iam_instance_profile -- [X] disassociate_route_table -- [ ] disassociate_subnet_cidr_block -- [X] disassociate_vpc_cidr_block -- [ ] enable_vgw_route_propagation -- [ ] enable_volume_io -- [ ] enable_vpc_classic_link -- [ ] enable_vpc_classic_link_dns_support -- [ ] get_console_output -- [ ] get_console_screenshot -- [ ] get_host_reservation_purchase_preview -- [ ] get_launch_template_data -- [ ] get_password_data -- [ ] get_reserved_instances_exchange_quote -- [ ] import_image -- [ ] import_instance -- [X] import_key_pair -- [ ] import_snapshot -- [ ] import_volume -- [ ] modify_fpga_image_attribute -- [ ] modify_hosts -- [ ] modify_id_format -- [ ] modify_identity_id_format -- [ ] modify_image_attribute -- [X] modify_instance_attribute -- [ ] modify_instance_credit_specification -- [ ] modify_instance_placement -- [ ] modify_launch_template -- [X] modify_network_interface_attribute -- [ ] modify_reserved_instances -- [ ] modify_snapshot_attribute -- [X] modify_spot_fleet_request -- [X] modify_subnet_attribute -- [ ] modify_volume -- [ ] modify_volume_attribute -- [X] modify_vpc_attribute -- [ ] modify_vpc_endpoint -- [ ] modify_vpc_endpoint_connection_notification -- [ ] modify_vpc_endpoint_service_configuration -- [ ] modify_vpc_endpoint_service_permissions -- [ ] modify_vpc_peering_connection_options -- [ ] modify_vpc_tenancy -- [ ] monitor_instances -- [ ] move_address_to_vpc -- [ ] purchase_host_reservation -- [ ] purchase_reserved_instances_offering -- [ ] purchase_scheduled_instances -- [X] reboot_instances -- [ ] register_image -- [ ] reject_vpc_endpoint_connections -- [X] reject_vpc_peering_connection -- [X] release_address -- [ ] release_hosts -- [ ] replace_iam_instance_profile_association -- [X] replace_network_acl_association -- [X] replace_network_acl_entry -- [X] replace_route -- [X] replace_route_table_association -- [ ] report_instance_status -- [X] request_spot_fleet -- [X] request_spot_instances -- [ ] reset_fpga_image_attribute -- [ ] reset_image_attribute -- [ ] reset_instance_attribute -- [ ] reset_network_interface_attribute -- [ ] reset_snapshot_attribute -- [ ] restore_address_to_classic -- [X] revoke_security_group_egress -- [X] revoke_security_group_ingress -- [ ] run_instances -- [ ] run_scheduled_instances -- [X] start_instances -- [X] stop_instances -- [X] terminate_instances -- [ ] unassign_ipv6_addresses -- [ ] unassign_private_ip_addresses -- [ ] unmonitor_instances -- [ ] update_security_group_rule_descriptions_egress -- [ ] update_security_group_rule_descriptions_ingress - -## ecr - 31% implemented -- [ ] batch_check_layer_availability -- [ ] batch_delete_image -- [X] batch_get_image -- [ ] complete_layer_upload -- [X] create_repository -- [ ] delete_lifecycle_policy -- [X] delete_repository -- [ ] delete_repository_policy -- [X] describe_images -- [X] describe_repositories -- [ ] get_authorization_token -- [ ] get_download_url_for_layer -- [ ] get_lifecycle_policy -- [ ] get_lifecycle_policy_preview -- [ ] get_repository_policy -- [ ] initiate_layer_upload -- [X] list_images -- [X] put_image -- [ ] put_lifecycle_policy -- [ ] set_repository_policy -- [ ] start_lifecycle_policy_preview -- [ ] upload_layer_part - -## ecs - 87% implemented -- [X] create_cluster -- [X] create_service -- [X] delete_attributes -- [X] delete_cluster -- [X] delete_service -- [X] deregister_container_instance -- [X] deregister_task_definition -- [X] describe_clusters -- [X] describe_container_instances -- [X] describe_services -- [X] describe_task_definition -- [X] describe_tasks -- [ ] discover_poll_endpoint -- [X] list_attributes -- [X] list_clusters -- [X] list_container_instances -- [X] list_services -- [X] list_task_definition_families -- [X] list_task_definitions -- [X] list_tasks -- [X] put_attributes -- [X] register_container_instance -- [X] register_task_definition -- [X] run_task -- [X] start_task -- [X] stop_task -- [ ] submit_container_state_change -- [ ] submit_task_state_change -- [ ] update_container_agent -- [X] update_container_instances_state -- [X] update_service - -## efs - 0% implemented -- [ ] create_file_system -- [ ] create_mount_target -- [ ] create_tags -- [ ] delete_file_system -- [ ] delete_mount_target -- [ ] delete_tags -- [ ] describe_file_systems -- [ ] describe_mount_target_security_groups -- [ ] describe_mount_targets -- [ ] describe_tags -- [ ] modify_mount_target_security_groups - -## elasticache - 0% implemented -- [ ] add_tags_to_resource -- [ ] authorize_cache_security_group_ingress -- [ ] copy_snapshot -- [ ] create_cache_cluster -- [ ] create_cache_parameter_group -- [ ] create_cache_security_group -- [ ] create_cache_subnet_group -- [ ] create_replication_group -- [ ] create_snapshot -- [ ] delete_cache_cluster -- [ ] delete_cache_parameter_group -- [ ] delete_cache_security_group -- [ ] delete_cache_subnet_group -- [ ] delete_replication_group -- [ ] delete_snapshot -- [ ] describe_cache_clusters -- [ ] describe_cache_engine_versions -- [ ] describe_cache_parameter_groups -- [ ] describe_cache_parameters -- [ ] describe_cache_security_groups -- [ ] describe_cache_subnet_groups -- [ ] describe_engine_default_parameters -- [ ] describe_events -- [ ] describe_replication_groups -- [ ] describe_reserved_cache_nodes -- [ ] describe_reserved_cache_nodes_offerings -- [ ] describe_snapshots -- [ ] list_allowed_node_type_modifications -- [ ] list_tags_for_resource -- [ ] modify_cache_cluster -- [ ] modify_cache_parameter_group -- [ ] modify_cache_subnet_group -- [ ] modify_replication_group -- [ ] modify_replication_group_shard_configuration -- [ ] purchase_reserved_cache_nodes_offering -- [ ] reboot_cache_cluster -- [ ] remove_tags_from_resource -- [ ] reset_cache_parameter_group -- [ ] revoke_cache_security_group_ingress -- [ ] test_failover - -## elasticbeanstalk - 0% implemented -- [ ] abort_environment_update -- [ ] apply_environment_managed_action -- [ ] check_dns_availability -- [ ] compose_environments -- [ ] create_application -- [ ] create_application_version -- [ ] create_configuration_template -- [ ] create_environment -- [ ] create_platform_version -- [ ] create_storage_location -- [ ] delete_application -- [ ] delete_application_version -- [ ] delete_configuration_template -- [ ] delete_environment_configuration -- [ ] delete_platform_version -- [ ] describe_account_attributes -- [ ] describe_application_versions -- [ ] describe_applications -- [ ] describe_configuration_options -- [ ] describe_configuration_settings -- [ ] describe_environment_health -- [ ] describe_environment_managed_action_history -- [ ] describe_environment_managed_actions -- [ ] describe_environment_resources -- [ ] describe_environments -- [ ] describe_events -- [ ] describe_instances_health -- [ ] describe_platform_version -- [ ] list_available_solution_stacks -- [ ] list_platform_versions -- [ ] list_tags_for_resource -- [ ] rebuild_environment -- [ ] request_environment_info -- [ ] restart_app_server -- [ ] retrieve_environment_info -- [ ] swap_environment_cnames -- [ ] terminate_environment -- [ ] update_application -- [ ] update_application_resource_lifecycle -- [ ] update_application_version -- [ ] update_configuration_template -- [ ] update_environment -- [ ] update_tags_for_resource -- [ ] validate_configuration_settings - -## elastictranscoder - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_pipeline -- [ ] create_preset -- [ ] delete_pipeline -- [ ] delete_preset -- [ ] list_jobs_by_pipeline -- [ ] list_jobs_by_status -- [ ] list_pipelines -- [ ] list_presets -- [ ] read_job -- [ ] read_pipeline -- [ ] read_preset -- [ ] test_role -- [ ] update_pipeline -- [ ] update_pipeline_notifications -- [ ] update_pipeline_status - -## elb - 34% implemented -- [ ] add_tags -- [X] apply_security_groups_to_load_balancer -- [ ] attach_load_balancer_to_subnets -- [X] configure_health_check -- [X] create_app_cookie_stickiness_policy -- [X] create_lb_cookie_stickiness_policy -- [X] create_load_balancer -- [X] create_load_balancer_listeners -- [ ] create_load_balancer_policy -- [X] delete_load_balancer -- [X] delete_load_balancer_listeners -- [ ] delete_load_balancer_policy -- [ ] deregister_instances_from_load_balancer -- [ ] describe_account_limits -- [ ] describe_instance_health -- [ ] describe_load_balancer_attributes -- [ ] describe_load_balancer_policies -- [ ] describe_load_balancer_policy_types -- [X] describe_load_balancers -- [ ] describe_tags -- [ ] detach_load_balancer_from_subnets -- [ ] disable_availability_zones_for_load_balancer -- [ ] enable_availability_zones_for_load_balancer -- [ ] modify_load_balancer_attributes -- [ ] register_instances_with_load_balancer -- [ ] remove_tags -- [ ] set_load_balancer_listener_ssl_certificate -- [ ] set_load_balancer_policies_for_backend_server -- [X] set_load_balancer_policies_of_listener - -## elbv2 - 70% implemented -- [ ] add_listener_certificates -- [ ] add_tags -- [X] create_listener -- [X] create_load_balancer -- [X] create_rule -- [X] create_target_group -- [X] delete_listener -- [X] delete_load_balancer -- [X] delete_rule -- [X] delete_target_group -- [X] deregister_targets -- [ ] describe_account_limits -- [ ] describe_listener_certificates -- [X] describe_listeners -- [X] describe_load_balancer_attributes -- [X] describe_load_balancers -- [X] describe_rules -- [ ] describe_ssl_policies -- [ ] describe_tags -- [ ] describe_target_group_attributes -- [X] describe_target_groups -- [X] describe_target_health -- [X] modify_listener -- [X] modify_load_balancer_attributes -- [X] modify_rule -- [X] modify_target_group -- [ ] modify_target_group_attributes -- [X] register_targets -- [ ] remove_listener_certificates -- [ ] remove_tags -- [X] set_ip_address_type -- [X] set_rule_priorities -- [X] set_security_groups -- [X] set_subnets - -## emr - 55% implemented -- [ ] add_instance_fleet -- [X] add_instance_groups -- [X] add_job_flow_steps -- [X] add_tags -- [ ] cancel_steps -- [ ] create_security_configuration -- [ ] delete_security_configuration -- [ ] describe_cluster -- [X] describe_job_flows -- [ ] describe_security_configuration -- [X] describe_step -- [X] list_bootstrap_actions -- [X] list_clusters -- [ ] list_instance_fleets -- [X] list_instance_groups -- [ ] list_instances -- [ ] list_security_configurations -- [X] list_steps -- [ ] modify_instance_fleet -- [X] modify_instance_groups -- [ ] put_auto_scaling_policy -- [ ] remove_auto_scaling_policy -- [X] remove_tags -- [X] run_job_flow -- [X] set_termination_protection -- [X] set_visible_to_all_users -- [X] terminate_job_flows - -## es - 0% implemented -- [ ] add_tags -- [ ] create_elasticsearch_domain -- [ ] delete_elasticsearch_domain -- [ ] delete_elasticsearch_service_role -- [ ] describe_elasticsearch_domain -- [ ] describe_elasticsearch_domain_config -- [ ] describe_elasticsearch_domains -- [ ] describe_elasticsearch_instance_type_limits -- [ ] list_domain_names -- [ ] list_elasticsearch_instance_types -- [ ] list_elasticsearch_versions -- [ ] list_tags -- [ ] remove_tags -- [ ] update_elasticsearch_domain_config - -## events - 100% implemented -- [X] delete_rule -- [X] describe_event_bus -- [X] describe_rule -- [X] disable_rule -- [X] enable_rule -- [X] list_rule_names_by_target -- [X] list_rules -- [X] list_targets_by_rule -- [X] put_events -- [X] put_permission -- [X] put_rule -- [X] put_targets -- [X] remove_permission -- [X] remove_targets -- [X] test_event_pattern - -## firehose - 0% implemented -- [ ] create_delivery_stream -- [ ] delete_delivery_stream -- [ ] describe_delivery_stream -- [ ] list_delivery_streams -- [ ] put_record -- [ ] put_record_batch -- [ ] update_destination - -## fms - 0% implemented -- [ ] associate_admin_account -- [ ] delete_notification_channel -- [ ] delete_policy -- [ ] disassociate_admin_account -- [ ] get_admin_account -- [ ] get_compliance_detail -- [ ] get_notification_channel -- [ ] get_policy -- [ ] list_compliance_status -- [ ] list_policies -- [ ] put_notification_channel -- [ ] put_policy - -## gamelift - 0% implemented -- [ ] accept_match -- [ ] create_alias -- [ ] create_build -- [ ] create_fleet -- [ ] create_game_session -- [ ] create_game_session_queue -- [ ] create_matchmaking_configuration -- [ ] create_matchmaking_rule_set -- [ ] create_player_session -- [ ] create_player_sessions -- [ ] create_vpc_peering_authorization -- [ ] create_vpc_peering_connection -- [ ] delete_alias -- [ ] delete_build -- [ ] delete_fleet -- [ ] delete_game_session_queue -- [ ] delete_matchmaking_configuration -- [ ] delete_scaling_policy -- [ ] delete_vpc_peering_authorization -- [ ] delete_vpc_peering_connection -- [ ] describe_alias -- [ ] describe_build -- [ ] describe_ec2_instance_limits -- [ ] describe_fleet_attributes -- [ ] describe_fleet_capacity -- [ ] describe_fleet_events -- [ ] describe_fleet_port_settings -- [ ] describe_fleet_utilization -- [ ] describe_game_session_details -- [ ] describe_game_session_placement -- [ ] describe_game_session_queues -- [ ] describe_game_sessions -- [ ] describe_instances -- [ ] describe_matchmaking -- [ ] describe_matchmaking_configurations -- [ ] describe_matchmaking_rule_sets -- [ ] describe_player_sessions -- [ ] describe_runtime_configuration -- [ ] describe_scaling_policies -- [ ] describe_vpc_peering_authorizations -- [ ] describe_vpc_peering_connections -- [ ] get_game_session_log_url -- [ ] get_instance_access -- [ ] list_aliases -- [ ] list_builds -- [ ] list_fleets -- [ ] put_scaling_policy -- [ ] request_upload_credentials -- [ ] resolve_alias -- [ ] search_game_sessions -- [ ] start_game_session_placement -- [ ] start_match_backfill -- [ ] start_matchmaking -- [ ] stop_game_session_placement -- [ ] stop_matchmaking -- [ ] update_alias -- [ ] update_build -- [ ] update_fleet_attributes -- [ ] update_fleet_capacity -- [ ] update_fleet_port_settings -- [ ] update_game_session -- [ ] update_game_session_queue -- [ ] update_matchmaking_configuration -- [ ] update_runtime_configuration -- [ ] validate_matchmaking_rule_set - -## glacier - 12% implemented -- [ ] abort_multipart_upload -- [ ] abort_vault_lock -- [ ] add_tags_to_vault -- [ ] complete_multipart_upload -- [ ] complete_vault_lock -- [X] create_vault -- [ ] delete_archive -- [X] delete_vault -- [ ] delete_vault_access_policy -- [ ] delete_vault_notifications -- [ ] describe_job -- [ ] describe_vault -- [ ] get_data_retrieval_policy -- [ ] get_job_output -- [ ] get_vault_access_policy -- [ ] get_vault_lock -- [ ] get_vault_notifications -- [X] initiate_job -- [ ] initiate_multipart_upload -- [ ] initiate_vault_lock -- [X] list_jobs -- [ ] list_multipart_uploads -- [ ] list_parts -- [ ] list_provisioned_capacity -- [ ] list_tags_for_vault -- [ ] list_vaults -- [ ] purchase_provisioned_capacity -- [ ] remove_tags_from_vault -- [ ] set_data_retrieval_policy -- [ ] set_vault_access_policy -- [ ] set_vault_notifications -- [ ] upload_archive -- [ ] upload_multipart_part - -## glue - 0% implemented -- [ ] batch_create_partition -- [ ] batch_delete_connection -- [ ] batch_delete_partition -- [ ] batch_delete_table -- [ ] batch_delete_table_version -- [ ] batch_get_partition -- [ ] batch_stop_job_run -- [ ] create_classifier -- [ ] create_connection -- [ ] create_crawler -- [ ] create_database -- [ ] create_dev_endpoint -- [ ] create_job -- [ ] create_partition -- [ ] create_script -- [ ] create_table -- [ ] create_trigger -- [ ] create_user_defined_function -- [ ] delete_classifier -- [ ] delete_connection -- [ ] delete_crawler -- [ ] delete_database -- [ ] delete_dev_endpoint -- [ ] delete_job -- [ ] delete_partition -- [ ] delete_table -- [ ] delete_table_version -- [ ] delete_trigger -- [ ] delete_user_defined_function -- [ ] get_catalog_import_status -- [ ] get_classifier -- [ ] get_classifiers -- [ ] get_connection -- [ ] get_connections -- [ ] get_crawler -- [ ] get_crawler_metrics -- [ ] get_crawlers -- [ ] get_database -- [ ] get_databases -- [ ] get_dataflow_graph -- [ ] get_dev_endpoint -- [ ] get_dev_endpoints -- [ ] get_job -- [ ] get_job_run -- [ ] get_job_runs -- [ ] get_jobs -- [ ] get_mapping -- [ ] get_partition -- [ ] get_partitions -- [ ] get_plan -- [ ] get_table -- [ ] get_table_version -- [ ] get_table_versions -- [ ] get_tables -- [ ] get_trigger -- [ ] get_triggers -- [ ] get_user_defined_function -- [ ] get_user_defined_functions -- [ ] import_catalog_to_glue -- [ ] reset_job_bookmark -- [ ] start_crawler -- [ ] start_crawler_schedule -- [ ] start_job_run -- [ ] start_trigger -- [ ] stop_crawler -- [ ] stop_crawler_schedule -- [ ] stop_trigger -- [ ] update_classifier -- [ ] update_connection -- [ ] update_crawler -- [ ] update_crawler_schedule -- [ ] update_database -- [ ] update_dev_endpoint -- [ ] update_job -- [ ] update_partition -- [ ] update_table -- [ ] update_trigger -- [ ] update_user_defined_function - -## greengrass - 0% implemented -- [ ] associate_role_to_group -- [ ] associate_service_role_to_account -- [ ] create_core_definition -- [ ] create_core_definition_version -- [ ] create_deployment -- [ ] create_device_definition -- [ ] create_device_definition_version -- [ ] create_function_definition -- [ ] create_function_definition_version -- [ ] create_group -- [ ] create_group_certificate_authority -- [ ] create_group_version -- [ ] create_logger_definition -- [ ] create_logger_definition_version -- [ ] create_resource_definition -- [ ] create_resource_definition_version -- [ ] create_software_update_job -- [ ] create_subscription_definition -- [ ] create_subscription_definition_version -- [ ] delete_core_definition -- [ ] delete_device_definition -- [ ] delete_function_definition -- [ ] delete_group -- [ ] delete_logger_definition -- [ ] delete_resource_definition -- [ ] delete_subscription_definition -- [ ] disassociate_role_from_group -- [ ] disassociate_service_role_from_account -- [ ] get_associated_role -- [ ] get_connectivity_info -- [ ] get_core_definition -- [ ] get_core_definition_version -- [ ] get_deployment_status -- [ ] get_device_definition -- [ ] get_device_definition_version -- [ ] get_function_definition -- [ ] get_function_definition_version -- [ ] get_group -- [ ] get_group_certificate_authority -- [ ] get_group_certificate_configuration -- [ ] get_group_version -- [ ] get_logger_definition -- [ ] get_logger_definition_version -- [ ] get_resource_definition -- [ ] get_resource_definition_version -- [ ] get_service_role_for_account -- [ ] get_subscription_definition -- [ ] get_subscription_definition_version -- [ ] list_core_definition_versions -- [ ] list_core_definitions -- [ ] list_deployments -- [ ] list_device_definition_versions -- [ ] list_device_definitions -- [ ] list_function_definition_versions -- [ ] list_function_definitions -- [ ] list_group_certificate_authorities -- [ ] list_group_versions -- [ ] list_groups -- [ ] list_logger_definition_versions -- [ ] list_logger_definitions -- [ ] list_resource_definition_versions -- [ ] list_resource_definitions -- [ ] list_subscription_definition_versions -- [ ] list_subscription_definitions -- [ ] reset_deployments -- [ ] update_connectivity_info -- [ ] update_core_definition -- [ ] update_device_definition -- [ ] update_function_definition -- [ ] update_group -- [ ] update_group_certificate_configuration -- [ ] update_logger_definition -- [ ] update_resource_definition -- [ ] update_subscription_definition - -## guardduty - 0% implemented -- [ ] accept_invitation -- [ ] archive_findings -- [ ] create_detector -- [ ] create_ip_set -- [ ] create_members -- [ ] create_sample_findings -- [ ] create_threat_intel_set -- [ ] decline_invitations -- [ ] delete_detector -- [ ] delete_invitations -- [ ] delete_ip_set -- [ ] delete_members -- [ ] delete_threat_intel_set -- [ ] disassociate_from_master_account -- [ ] disassociate_members -- [ ] get_detector -- [ ] get_findings -- [ ] get_findings_statistics -- [ ] get_invitations_count -- [ ] get_ip_set -- [ ] get_master_account -- [ ] get_members -- [ ] get_threat_intel_set -- [ ] invite_members -- [ ] list_detectors -- [ ] list_findings -- [ ] list_invitations -- [ ] list_ip_sets -- [ ] list_members -- [ ] list_threat_intel_sets -- [ ] start_monitoring_members -- [ ] stop_monitoring_members -- [ ] unarchive_findings -- [ ] update_detector -- [ ] update_findings_feedback -- [ ] update_ip_set -- [ ] update_threat_intel_set - -## health - 0% implemented -- [ ] describe_affected_entities -- [ ] describe_entity_aggregates -- [ ] describe_event_aggregates -- [ ] describe_event_details -- [ ] describe_event_types -- [ ] describe_events - -## iam - 48% implemented -- [ ] add_client_id_to_open_id_connect_provider -- [X] add_role_to_instance_profile -- [X] add_user_to_group -- [X] attach_group_policy -- [X] attach_role_policy -- [X] attach_user_policy -- [ ] change_password -- [X] create_access_key -- [X] create_account_alias -- [X] create_group -- [X] create_instance_profile -- [X] create_login_profile -- [ ] create_open_id_connect_provider -- [X] create_policy -- [X] create_policy_version -- [X] create_role -- [ ] create_saml_provider -- [ ] create_service_linked_role -- [ ] create_service_specific_credential -- [X] create_user -- [ ] create_virtual_mfa_device -- [X] deactivate_mfa_device -- [X] delete_access_key -- [X] delete_account_alias -- [ ] delete_account_password_policy -- [ ] delete_group -- [ ] delete_group_policy -- [ ] delete_instance_profile -- [X] delete_login_profile -- [ ] delete_open_id_connect_provider -- [ ] delete_policy -- [X] delete_policy_version -- [X] delete_role -- [X] delete_role_policy -- [ ] delete_saml_provider -- [X] delete_server_certificate -- [ ] delete_service_linked_role -- [ ] delete_service_specific_credential -- [ ] delete_signing_certificate -- [ ] delete_ssh_public_key -- [X] delete_user -- [X] delete_user_policy -- [ ] delete_virtual_mfa_device -- [X] detach_group_policy -- [X] detach_role_policy -- [X] detach_user_policy -- [X] enable_mfa_device -- [ ] generate_credential_report -- [ ] get_access_key_last_used -- [X] get_account_authorization_details -- [ ] get_account_password_policy -- [ ] get_account_summary -- [ ] get_context_keys_for_custom_policy -- [ ] get_context_keys_for_principal_policy -- [X] get_credential_report -- [X] get_group -- [X] get_group_policy -- [X] get_instance_profile -- [X] get_login_profile -- [ ] get_open_id_connect_provider -- [X] get_policy -- [X] get_policy_version -- [X] get_role -- [X] get_role_policy -- [ ] get_saml_provider -- [X] get_server_certificate -- [ ] get_service_linked_role_deletion_status -- [ ] get_ssh_public_key -- [X] get_user -- [X] get_user_policy -- [ ] list_access_keys -- [X] list_account_aliases -- [X] list_attached_group_policies -- [X] list_attached_role_policies -- [X] list_attached_user_policies -- [ ] list_entities_for_policy -- [X] list_group_policies -- [X] list_groups -- [ ] list_groups_for_user -- [ ] list_instance_profiles -- [ ] list_instance_profiles_for_role -- [X] list_mfa_devices -- [ ] list_open_id_connect_providers -- [X] list_policies -- [X] list_policy_versions -- [X] list_role_policies -- [ ] list_roles -- [ ] list_saml_providers -- [ ] list_server_certificates -- [ ] list_service_specific_credentials -- [ ] list_signing_certificates -- [ ] list_ssh_public_keys -- [X] list_user_policies -- [X] list_users -- [ ] list_virtual_mfa_devices -- [X] put_group_policy -- [X] put_role_policy -- [X] put_user_policy -- [ ] remove_client_id_from_open_id_connect_provider -- [X] remove_role_from_instance_profile -- [X] remove_user_from_group -- [ ] reset_service_specific_credential -- [ ] resync_mfa_device -- [ ] set_default_policy_version -- [ ] simulate_custom_policy -- [ ] simulate_principal_policy -- [X] update_access_key -- [ ] update_account_password_policy -- [ ] update_assume_role_policy -- [ ] update_group -- [X] update_login_profile -- [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider -- [ ] update_server_certificate -- [ ] update_service_specific_credential -- [ ] update_signing_certificate -- [ ] update_ssh_public_key -- [ ] update_user -- [ ] upload_server_certificate -- [ ] upload_signing_certificate -- [ ] upload_ssh_public_key - -## importexport - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] get_shipping_label -- [ ] get_status -- [ ] list_jobs -- [ ] update_job - -## inspector - 0% implemented -- [ ] add_attributes_to_findings -- [ ] create_assessment_target -- [ ] create_assessment_template -- [ ] create_resource_group -- [ ] delete_assessment_run -- [ ] delete_assessment_target -- [ ] delete_assessment_template -- [ ] describe_assessment_runs -- [ ] describe_assessment_targets -- [ ] describe_assessment_templates -- [ ] describe_cross_account_access_role -- [ ] describe_findings -- [ ] describe_resource_groups -- [ ] describe_rules_packages -- [ ] get_assessment_report -- [ ] get_telemetry_metadata -- [ ] list_assessment_run_agents -- [ ] list_assessment_runs -- [ ] list_assessment_targets -- [ ] list_assessment_templates -- [ ] list_event_subscriptions -- [ ] list_findings -- [ ] list_rules_packages -- [ ] list_tags_for_resource -- [ ] preview_agents -- [ ] register_cross_account_access_role -- [ ] remove_attributes_from_findings -- [ ] set_tags_for_resource -- [ ] start_assessment_run -- [ ] stop_assessment_run -- [ ] subscribe_to_event -- [ ] unsubscribe_from_event -- [ ] update_assessment_target - -## iot - 30% implemented -- [ ] accept_certificate_transfer -- [X] add_thing_to_thing_group -- [ ] associate_targets_with_job -- [ ] attach_policy -- [X] attach_principal_policy -- [X] attach_thing_principal -- [ ] cancel_certificate_transfer -- [ ] cancel_job -- [ ] clear_default_authorizer -- [ ] create_authorizer -- [ ] create_certificate_from_csr -- [X] create_job -- [X] create_keys_and_certificate -- [ ] create_ota_update -- [X] create_policy -- [ ] create_policy_version -- [ ] create_role_alias -- [ ] create_stream -- [X] create_thing -- [X] create_thing_group -- [X] create_thing_type -- [ ] create_topic_rule -- [ ] delete_authorizer -- [ ] delete_ca_certificate -- [X] delete_certificate -- [ ] delete_ota_update -- [X] delete_policy -- [ ] delete_policy_version -- [ ] delete_registration_code -- [ ] delete_role_alias -- [ ] delete_stream -- [X] delete_thing -- [X] delete_thing_group -- [X] delete_thing_type -- [ ] delete_topic_rule -- [ ] delete_v2_logging_level -- [ ] deprecate_thing_type -- [ ] describe_authorizer -- [ ] describe_ca_certificate -- [X] describe_certificate -- [ ] describe_default_authorizer -- [ ] describe_endpoint -- [ ] describe_event_configurations -- [ ] describe_index -- [X] describe_job -- [ ] describe_job_execution -- [ ] describe_role_alias -- [ ] describe_stream -- [X] describe_thing -- [X] describe_thing_group -- [ ] describe_thing_registration_task -- [X] describe_thing_type -- [ ] detach_policy -- [X] detach_principal_policy -- [X] detach_thing_principal -- [ ] disable_topic_rule -- [ ] enable_topic_rule -- [ ] get_effective_policies -- [ ] get_indexing_configuration -- [ ] get_job_document -- [ ] get_logging_options -- [ ] get_ota_update -- [X] get_policy -- [ ] get_policy_version -- [ ] get_registration_code -- [ ] get_topic_rule -- [ ] get_v2_logging_options -- [ ] list_attached_policies -- [ ] list_authorizers -- [ ] list_ca_certificates -- [X] list_certificates -- [ ] list_certificates_by_ca -- [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs -- [ ] list_ota_updates -- [ ] list_outgoing_certificates -- [X] list_policies -- [X] list_policy_principals -- [ ] list_policy_versions -- [X] list_principal_policies -- [X] list_principal_things -- [ ] list_role_aliases -- [ ] list_streams -- [ ] list_targets_for_policy -- [X] list_thing_groups -- [X] list_thing_groups_for_thing -- [X] list_thing_principals -- [ ] list_thing_registration_task_reports -- [ ] list_thing_registration_tasks -- [X] list_thing_types -- [X] list_things -- [X] list_things_in_thing_group -- [ ] list_topic_rules -- [ ] list_v2_logging_levels -- [ ] register_ca_certificate -- [ ] register_certificate -- [ ] register_thing -- [ ] reject_certificate_transfer -- [X] remove_thing_from_thing_group -- [ ] replace_topic_rule -- [ ] search_index -- [ ] set_default_authorizer -- [ ] set_default_policy_version -- [ ] set_logging_options -- [ ] set_v2_logging_level -- [ ] set_v2_logging_options -- [ ] start_thing_registration_task -- [ ] stop_thing_registration_task -- [ ] test_authorization -- [ ] test_invoke_authorizer -- [ ] transfer_certificate -- [ ] update_authorizer -- [ ] update_ca_certificate -- [X] update_certificate -- [ ] update_event_configurations -- [ ] update_indexing_configuration -- [ ] update_role_alias -- [ ] update_stream -- [X] update_thing -- [X] update_thing_group -- [X] update_thing_groups_for_thing - -## iot-data - 0% implemented -- [ ] delete_thing_shadow -- [ ] get_thing_shadow -- [ ] publish -- [ ] update_thing_shadow - -## iot-jobs-data - 0% implemented -- [ ] describe_job_execution -- [ ] get_pending_job_executions -- [ ] start_next_pending_job_execution -- [ ] update_job_execution - -## kinesis - 56% implemented -- [X] add_tags_to_stream -- [X] create_stream -- [ ] decrease_stream_retention_period -- [X] delete_stream -- [ ] describe_limits -- [X] describe_stream -- [ ] describe_stream_summary -- [ ] disable_enhanced_monitoring -- [ ] enable_enhanced_monitoring -- [X] get_records -- [X] get_shard_iterator -- [ ] increase_stream_retention_period -- [ ] list_shards -- [X] list_streams -- [X] list_tags_for_stream -- [X] merge_shards -- [X] put_record -- [X] put_records -- [X] remove_tags_from_stream -- [X] split_shard -- [ ] start_stream_encryption -- [ ] stop_stream_encryption -- [ ] update_shard_count - -## kinesis-video-archived-media - 0% implemented -- [ ] get_media_for_fragment_list -- [ ] list_fragments - -## kinesis-video-media - 0% implemented -- [ ] get_media - -## kinesisanalytics - 0% implemented -- [ ] add_application_cloud_watch_logging_option -- [ ] add_application_input -- [ ] add_application_input_processing_configuration -- [ ] add_application_output -- [ ] add_application_reference_data_source -- [ ] create_application -- [ ] delete_application -- [ ] delete_application_cloud_watch_logging_option -- [ ] delete_application_input_processing_configuration -- [ ] delete_application_output -- [ ] delete_application_reference_data_source -- [ ] describe_application -- [ ] discover_input_schema -- [ ] list_applications -- [ ] start_application -- [ ] stop_application -- [ ] update_application - -## kinesisvideo - 0% implemented -- [ ] create_stream -- [ ] delete_stream -- [ ] describe_stream -- [ ] get_data_endpoint -- [ ] list_streams -- [ ] list_tags_for_stream -- [ ] tag_stream -- [ ] untag_stream -- [ ] update_data_retention -- [ ] update_stream - -## kms - 25% implemented -- [ ] cancel_key_deletion -- [ ] create_alias -- [ ] create_grant -- [X] create_key -- [ ] decrypt -- [X] delete_alias -- [ ] delete_imported_key_material -- [X] describe_key -- [ ] disable_key -- [X] disable_key_rotation -- [ ] enable_key -- [X] enable_key_rotation -- [ ] encrypt -- [ ] generate_data_key -- [ ] generate_data_key_without_plaintext -- [ ] generate_random -- [X] get_key_policy -- [X] get_key_rotation_status -- [ ] get_parameters_for_import -- [ ] import_key_material -- [ ] list_aliases -- [ ] list_grants -- [ ] list_key_policies -- [X] list_keys -- [ ] list_resource_tags -- [ ] list_retirable_grants -- [X] put_key_policy -- [ ] re_encrypt -- [ ] retire_grant -- [ ] revoke_grant -- [ ] schedule_key_deletion -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_key_description - -## lambda - 0% implemented -- [ ] add_permission -- [ ] create_alias -- [ ] create_event_source_mapping -- [ ] create_function -- [ ] delete_alias -- [ ] delete_event_source_mapping -- [ ] delete_function -- [ ] delete_function_concurrency -- [ ] get_account_settings -- [ ] get_alias -- [ ] get_event_source_mapping -- [ ] get_function -- [ ] get_function_configuration -- [ ] get_policy -- [ ] invoke -- [ ] invoke_async -- [ ] list_aliases -- [ ] list_event_source_mappings -- [ ] list_functions -- [ ] list_tags -- [ ] list_versions_by_function -- [ ] publish_version -- [ ] put_function_concurrency -- [ ] remove_permission -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_alias -- [ ] update_event_source_mapping -- [ ] update_function_code -- [ ] update_function_configuration - -## lex-models - 0% implemented -- [ ] create_bot_version -- [ ] create_intent_version -- [ ] create_slot_type_version -- [ ] delete_bot -- [ ] delete_bot_alias -- [ ] delete_bot_channel_association -- [ ] delete_bot_version -- [ ] delete_intent -- [ ] delete_intent_version -- [ ] delete_slot_type -- [ ] delete_slot_type_version -- [ ] delete_utterances -- [ ] get_bot -- [ ] get_bot_alias -- [ ] get_bot_aliases -- [ ] get_bot_channel_association -- [ ] get_bot_channel_associations -- [ ] get_bot_versions -- [ ] get_bots -- [ ] get_builtin_intent -- [ ] get_builtin_intents -- [ ] get_builtin_slot_types -- [ ] get_export -- [ ] get_import -- [ ] get_intent -- [ ] get_intent_versions -- [ ] get_intents -- [ ] get_slot_type -- [ ] get_slot_type_versions -- [ ] get_slot_types -- [ ] get_utterances_view -- [ ] put_bot -- [ ] put_bot_alias -- [ ] put_intent -- [ ] put_slot_type -- [ ] start_import - -## lex-runtime - 0% implemented -- [ ] post_content -- [ ] post_text - -## lightsail - 0% implemented -- [ ] allocate_static_ip -- [ ] attach_disk -- [ ] attach_instances_to_load_balancer -- [ ] attach_load_balancer_tls_certificate -- [ ] attach_static_ip -- [ ] close_instance_public_ports -- [ ] create_disk -- [ ] create_disk_from_snapshot -- [ ] create_disk_snapshot -- [ ] create_domain -- [ ] create_domain_entry -- [ ] create_instance_snapshot -- [ ] create_instances -- [ ] create_instances_from_snapshot -- [ ] create_key_pair -- [ ] create_load_balancer -- [ ] create_load_balancer_tls_certificate -- [ ] delete_disk -- [ ] delete_disk_snapshot -- [ ] delete_domain -- [ ] delete_domain_entry -- [ ] delete_instance -- [ ] delete_instance_snapshot -- [ ] delete_key_pair -- [ ] delete_load_balancer -- [ ] delete_load_balancer_tls_certificate -- [ ] detach_disk -- [ ] detach_instances_from_load_balancer -- [ ] detach_static_ip -- [ ] download_default_key_pair -- [ ] get_active_names -- [ ] get_blueprints -- [ ] get_bundles -- [ ] get_disk -- [ ] get_disk_snapshot -- [ ] get_disk_snapshots -- [ ] get_disks -- [ ] get_domain -- [ ] get_domains -- [ ] get_instance -- [ ] get_instance_access_details -- [ ] get_instance_metric_data -- [ ] get_instance_port_states -- [ ] get_instance_snapshot -- [ ] get_instance_snapshots -- [ ] get_instance_state -- [ ] get_instances -- [ ] get_key_pair -- [ ] get_key_pairs -- [ ] get_load_balancer -- [ ] get_load_balancer_metric_data -- [ ] get_load_balancer_tls_certificates -- [ ] get_load_balancers -- [ ] get_operation -- [ ] get_operations -- [ ] get_operations_for_resource -- [ ] get_regions -- [ ] get_static_ip -- [ ] get_static_ips -- [ ] import_key_pair -- [ ] is_vpc_peered -- [ ] open_instance_public_ports -- [ ] peer_vpc -- [ ] put_instance_public_ports -- [ ] reboot_instance -- [ ] release_static_ip -- [ ] start_instance -- [ ] stop_instance -- [ ] unpeer_vpc -- [ ] update_domain_entry -- [ ] update_load_balancer_attribute - -## logs - 27% implemented -- [ ] associate_kms_key -- [ ] cancel_export_task -- [ ] create_export_task -- [X] create_log_group -- [X] create_log_stream -- [ ] delete_destination -- [X] delete_log_group -- [X] delete_log_stream -- [ ] delete_metric_filter -- [ ] delete_resource_policy -- [ ] delete_retention_policy -- [ ] delete_subscription_filter -- [ ] describe_destinations -- [ ] describe_export_tasks -- [X] describe_log_groups -- [X] describe_log_streams -- [ ] describe_metric_filters -- [ ] describe_resource_policies -- [ ] describe_subscription_filters -- [ ] disassociate_kms_key -- [X] filter_log_events -- [X] get_log_events -- [ ] list_tags_log_group -- [ ] put_destination -- [ ] put_destination_policy -- [X] put_log_events -- [ ] put_metric_filter -- [ ] put_resource_policy -- [ ] put_retention_policy -- [ ] put_subscription_filter -- [ ] tag_log_group -- [ ] test_metric_filter -- [ ] untag_log_group - -## machinelearning - 0% implemented -- [ ] add_tags -- [ ] create_batch_prediction -- [ ] create_data_source_from_rds -- [ ] create_data_source_from_redshift -- [ ] create_data_source_from_s3 -- [ ] create_evaluation -- [ ] create_ml_model -- [ ] create_realtime_endpoint -- [ ] delete_batch_prediction -- [ ] delete_data_source -- [ ] delete_evaluation -- [ ] delete_ml_model -- [ ] delete_realtime_endpoint -- [ ] delete_tags -- [ ] describe_batch_predictions -- [ ] describe_data_sources -- [ ] describe_evaluations -- [ ] describe_ml_models -- [ ] describe_tags -- [ ] get_batch_prediction -- [ ] get_data_source -- [ ] get_evaluation -- [ ] get_ml_model -- [ ] predict -- [ ] update_batch_prediction -- [ ] update_data_source -- [ ] update_evaluation -- [ ] update_ml_model - -## marketplace-entitlement - 0% implemented -- [ ] get_entitlements - -## marketplacecommerceanalytics - 0% implemented -- [ ] generate_data_set -- [ ] start_support_data_export - -## mediaconvert - 0% implemented -- [ ] cancel_job -- [ ] create_job -- [ ] create_job_template -- [ ] create_preset -- [ ] create_queue -- [ ] delete_job_template -- [ ] delete_preset -- [ ] delete_queue -- [ ] describe_endpoints -- [ ] get_job -- [ ] get_job_template -- [ ] get_preset -- [ ] get_queue -- [ ] list_job_templates -- [ ] list_jobs -- [ ] list_presets -- [ ] list_queues -- [ ] update_job_template -- [ ] update_preset -- [ ] update_queue - -## medialive - 0% implemented -- [ ] create_channel -- [ ] create_input -- [ ] create_input_security_group -- [ ] delete_channel -- [ ] delete_input -- [ ] delete_input_security_group -- [ ] describe_channel -- [ ] describe_input -- [ ] describe_input_security_group -- [ ] list_channels -- [ ] list_input_security_groups -- [ ] list_inputs -- [ ] start_channel -- [ ] stop_channel -- [ ] update_channel -- [ ] update_input -- [ ] update_input_security_group - -## mediapackage - 0% implemented -- [ ] create_channel -- [ ] create_origin_endpoint -- [ ] delete_channel -- [ ] delete_origin_endpoint -- [ ] describe_channel -- [ ] describe_origin_endpoint -- [ ] list_channels -- [ ] list_origin_endpoints -- [ ] rotate_channel_credentials -- [ ] update_channel -- [ ] update_origin_endpoint - -## mediastore - 0% implemented -- [ ] create_container -- [ ] delete_container -- [ ] delete_container_policy -- [ ] delete_cors_policy -- [ ] describe_container -- [ ] get_container_policy -- [ ] get_cors_policy -- [ ] list_containers -- [ ] put_container_policy -- [ ] put_cors_policy - -## mediastore-data - 0% implemented -- [ ] delete_object -- [ ] describe_object -- [ ] get_object -- [ ] list_items -- [ ] put_object - -## meteringmarketplace - 0% implemented -- [ ] batch_meter_usage -- [ ] meter_usage -- [ ] resolve_customer - -## mgh - 0% implemented -- [ ] associate_created_artifact -- [ ] associate_discovered_resource -- [ ] create_progress_update_stream -- [ ] delete_progress_update_stream -- [ ] describe_application_state -- [ ] describe_migration_task -- [ ] disassociate_created_artifact -- [ ] disassociate_discovered_resource -- [ ] import_migration_task -- [ ] list_created_artifacts -- [ ] list_discovered_resources -- [ ] list_migration_tasks -- [ ] list_progress_update_streams -- [ ] notify_application_state -- [ ] notify_migration_task_state -- [ ] put_resource_attributes - -## mobile - 0% implemented -- [ ] create_project -- [ ] delete_project -- [ ] describe_bundle -- [ ] describe_project -- [ ] export_bundle -- [ ] export_project -- [ ] list_bundles -- [ ] list_projects -- [ ] update_project - -## mq - 0% implemented -- [ ] create_broker -- [ ] create_configuration -- [ ] create_user -- [ ] delete_broker -- [ ] delete_user -- [ ] describe_broker -- [ ] describe_configuration -- [ ] describe_configuration_revision -- [ ] describe_user -- [ ] list_brokers -- [ ] list_configuration_revisions -- [ ] list_configurations -- [ ] list_users -- [ ] reboot_broker -- [ ] update_broker -- [ ] update_configuration -- [ ] update_user - -## mturk - 0% implemented -- [ ] accept_qualification_request -- [ ] approve_assignment -- [ ] associate_qualification_with_worker -- [ ] create_additional_assignments_for_hit -- [ ] create_hit -- [ ] create_hit_type -- [ ] create_hit_with_hit_type -- [ ] create_qualification_type -- [ ] create_worker_block -- [ ] delete_hit -- [ ] delete_qualification_type -- [ ] delete_worker_block -- [ ] disassociate_qualification_from_worker -- [ ] get_account_balance -- [ ] get_assignment -- [ ] get_file_upload_url -- [ ] get_hit -- [ ] get_qualification_score -- [ ] get_qualification_type -- [ ] list_assignments_for_hit -- [ ] list_bonus_payments -- [ ] list_hits -- [ ] list_hits_for_qualification_type -- [ ] list_qualification_requests -- [ ] list_qualification_types -- [ ] list_review_policy_results_for_hit -- [ ] list_reviewable_hits -- [ ] list_worker_blocks -- [ ] list_workers_with_qualification_type -- [ ] notify_workers -- [ ] reject_assignment -- [ ] reject_qualification_request -- [ ] send_bonus -- [ ] send_test_event_notification -- [ ] update_expiration_for_hit -- [ ] update_hit_review_status -- [ ] update_hit_type_of_hit -- [ ] update_notification_settings -- [ ] update_qualification_type - -## opsworks - 12% implemented -- [ ] assign_instance -- [ ] assign_volume -- [ ] associate_elastic_ip -- [ ] attach_elastic_load_balancer -- [ ] clone_stack -- [X] create_app -- [ ] create_deployment -- [X] create_instance -- [X] create_layer -- [X] create_stack -- [ ] create_user_profile -- [ ] delete_app -- [ ] delete_instance -- [ ] delete_layer -- [ ] delete_stack -- [ ] delete_user_profile -- [ ] deregister_ecs_cluster -- [ ] deregister_elastic_ip -- [ ] deregister_instance -- [ ] deregister_rds_db_instance -- [ ] deregister_volume -- [ ] describe_agent_versions -- [X] describe_apps -- [ ] describe_commands -- [ ] describe_deployments -- [ ] describe_ecs_clusters -- [ ] describe_elastic_ips -- [ ] describe_elastic_load_balancers -- [X] describe_instances -- [X] describe_layers -- [ ] describe_load_based_auto_scaling -- [ ] describe_my_user_profile -- [ ] describe_operating_systems -- [ ] describe_permissions -- [ ] describe_raid_arrays -- [ ] describe_rds_db_instances -- [ ] describe_service_errors -- [ ] describe_stack_provisioning_parameters -- [ ] describe_stack_summary -- [X] describe_stacks -- [ ] describe_time_based_auto_scaling -- [ ] describe_user_profiles -- [ ] describe_volumes -- [ ] detach_elastic_load_balancer -- [ ] disassociate_elastic_ip -- [ ] get_hostname_suggestion -- [ ] grant_access -- [ ] list_tags -- [ ] reboot_instance -- [ ] register_ecs_cluster -- [ ] register_elastic_ip -- [ ] register_instance -- [ ] register_rds_db_instance -- [ ] register_volume -- [ ] set_load_based_auto_scaling -- [ ] set_permission -- [ ] set_time_based_auto_scaling -- [X] start_instance -- [ ] start_stack -- [ ] stop_instance -- [ ] stop_stack -- [ ] tag_resource -- [ ] unassign_instance -- [ ] unassign_volume -- [ ] untag_resource -- [ ] update_app -- [ ] update_elastic_ip -- [ ] update_instance -- [ ] update_layer -- [ ] update_my_user_profile -- [ ] update_rds_db_instance -- [ ] update_stack -- [ ] update_user_profile -- [ ] update_volume - -## opsworkscm - 0% implemented -- [ ] associate_node -- [ ] create_backup -- [ ] create_server -- [ ] delete_backup -- [ ] delete_server -- [ ] describe_account_attributes -- [ ] describe_backups -- [ ] describe_events -- [ ] describe_node_association_status -- [ ] describe_servers -- [ ] disassociate_node -- [ ] restore_server -- [ ] start_maintenance -- [ ] update_server -- [ ] update_server_engine_attributes - -## organizations - 30% implemented -- [ ] accept_handshake -- [ ] attach_policy -- [ ] cancel_handshake -- [X] create_account -- [X] create_organization -- [X] create_organizational_unit -- [ ] create_policy -- [ ] decline_handshake -- [ ] delete_organization -- [ ] delete_organizational_unit -- [ ] delete_policy -- [X] describe_account -- [ ] describe_create_account_status -- [ ] describe_handshake -- [X] describe_organization -- [X] describe_organizational_unit -- [ ] describe_policy -- [ ] detach_policy -- [ ] disable_aws_service_access -- [ ] disable_policy_type -- [ ] enable_all_features -- [ ] enable_aws_service_access -- [ ] enable_policy_type -- [ ] invite_account_to_organization -- [ ] leave_organization -- [X] list_accounts -- [X] list_accounts_for_parent -- [ ] list_aws_service_access_for_organization -- [X] list_children -- [ ] list_create_account_status -- [ ] list_handshakes_for_account -- [ ] list_handshakes_for_organization -- [X] list_organizational_units_for_parent -- [X] list_parents -- [ ] list_policies -- [ ] list_policies_for_target -- [X] list_roots -- [ ] list_targets_for_policy -- [X] move_account -- [ ] remove_account_from_organization -- [ ] update_organizational_unit -- [ ] update_policy - -## pinpoint - 0% implemented -- [ ] create_app -- [ ] create_campaign -- [ ] create_export_job -- [ ] create_import_job -- [ ] create_segment -- [ ] delete_adm_channel -- [ ] delete_apns_channel -- [ ] delete_apns_sandbox_channel -- [ ] delete_apns_voip_channel -- [ ] delete_apns_voip_sandbox_channel -- [ ] delete_app -- [ ] delete_baidu_channel -- [ ] delete_campaign -- [ ] delete_email_channel -- [ ] delete_endpoint -- [ ] delete_event_stream -- [ ] delete_gcm_channel -- [ ] delete_segment -- [ ] delete_sms_channel -- [ ] get_adm_channel -- [ ] get_apns_channel -- [ ] get_apns_sandbox_channel -- [ ] get_apns_voip_channel -- [ ] get_apns_voip_sandbox_channel -- [ ] get_app -- [ ] get_application_settings -- [ ] get_apps -- [ ] get_baidu_channel -- [ ] get_campaign -- [ ] get_campaign_activities -- [ ] get_campaign_version -- [ ] get_campaign_versions -- [ ] get_campaigns -- [ ] get_email_channel -- [ ] get_endpoint -- [ ] get_event_stream -- [ ] get_export_job -- [ ] get_export_jobs -- [ ] get_gcm_channel -- [ ] get_import_job -- [ ] get_import_jobs -- [ ] get_segment -- [ ] get_segment_export_jobs -- [ ] get_segment_import_jobs -- [ ] get_segment_version -- [ ] get_segment_versions -- [ ] get_segments -- [ ] get_sms_channel -- [ ] put_event_stream -- [ ] send_messages -- [ ] send_users_messages -- [ ] update_adm_channel -- [ ] update_apns_channel -- [ ] update_apns_sandbox_channel -- [ ] update_apns_voip_channel -- [ ] update_apns_voip_sandbox_channel -- [ ] update_application_settings -- [ ] update_baidu_channel -- [ ] update_campaign -- [ ] update_email_channel -- [ ] update_endpoint -- [ ] update_endpoints_batch -- [ ] update_gcm_channel -- [ ] update_segment -- [ ] update_sms_channel - -## polly - 83% implemented -- [X] delete_lexicon -- [X] describe_voices -- [X] get_lexicon -- [X] list_lexicons -- [X] put_lexicon -- [ ] synthesize_speech - -## pricing - 0% implemented -- [ ] describe_services -- [ ] get_attribute_values -- [ ] get_products - -## rds - 0% implemented -- [ ] add_role_to_db_cluster -- [ ] add_source_identifier_to_subscription -- [ ] add_tags_to_resource -- [ ] apply_pending_maintenance_action -- [ ] authorize_db_security_group_ingress -- [ ] copy_db_cluster_parameter_group -- [ ] copy_db_cluster_snapshot -- [ ] copy_db_parameter_group -- [ ] copy_db_snapshot -- [ ] copy_option_group -- [ ] create_db_cluster -- [ ] create_db_cluster_parameter_group -- [ ] create_db_cluster_snapshot -- [ ] create_db_instance -- [ ] create_db_instance_read_replica -- [ ] create_db_parameter_group -- [ ] create_db_security_group -- [ ] create_db_snapshot -- [ ] create_db_subnet_group -- [ ] create_event_subscription -- [ ] create_option_group -- [ ] delete_db_cluster -- [ ] delete_db_cluster_parameter_group -- [ ] delete_db_cluster_snapshot -- [ ] delete_db_instance -- [ ] delete_db_parameter_group -- [ ] delete_db_security_group -- [ ] delete_db_snapshot -- [ ] delete_db_subnet_group -- [ ] delete_event_subscription -- [ ] delete_option_group -- [ ] describe_account_attributes -- [ ] describe_certificates -- [ ] describe_db_cluster_parameter_groups -- [ ] describe_db_cluster_parameters -- [ ] describe_db_cluster_snapshot_attributes -- [ ] describe_db_cluster_snapshots -- [ ] describe_db_clusters -- [ ] describe_db_engine_versions -- [ ] describe_db_instances -- [ ] describe_db_log_files -- [ ] describe_db_parameter_groups -- [ ] describe_db_parameters -- [ ] describe_db_security_groups -- [ ] describe_db_snapshot_attributes -- [ ] describe_db_snapshots -- [ ] describe_db_subnet_groups -- [ ] describe_engine_default_cluster_parameters -- [ ] describe_engine_default_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_option_group_options -- [ ] describe_option_groups -- [ ] describe_orderable_db_instance_options -- [ ] describe_pending_maintenance_actions -- [ ] describe_reserved_db_instances -- [ ] describe_reserved_db_instances_offerings -- [ ] describe_source_regions -- [ ] describe_valid_db_instance_modifications -- [ ] download_db_log_file_portion -- [ ] failover_db_cluster -- [ ] list_tags_for_resource -- [ ] modify_db_cluster -- [ ] modify_db_cluster_parameter_group -- [ ] modify_db_cluster_snapshot_attribute -- [ ] modify_db_instance -- [ ] modify_db_parameter_group -- [ ] modify_db_snapshot -- [ ] modify_db_snapshot_attribute -- [ ] modify_db_subnet_group -- [ ] modify_event_subscription -- [ ] modify_option_group -- [ ] promote_read_replica -- [ ] promote_read_replica_db_cluster -- [ ] purchase_reserved_db_instances_offering -- [ ] reboot_db_instance -- [ ] remove_role_from_db_cluster -- [ ] remove_source_identifier_from_subscription -- [ ] remove_tags_from_resource -- [ ] reset_db_cluster_parameter_group -- [ ] reset_db_parameter_group -- [ ] restore_db_cluster_from_s3 -- [ ] restore_db_cluster_from_snapshot -- [ ] restore_db_cluster_to_point_in_time -- [ ] restore_db_instance_from_db_snapshot -- [ ] restore_db_instance_from_s3 -- [ ] restore_db_instance_to_point_in_time -- [ ] revoke_db_security_group_ingress -- [ ] start_db_instance -- [ ] stop_db_instance - -## redshift - 41% implemented -- [ ] authorize_cluster_security_group_ingress -- [ ] authorize_snapshot_access -- [ ] copy_cluster_snapshot -- [X] create_cluster -- [X] create_cluster_parameter_group -- [X] create_cluster_security_group -- [X] create_cluster_snapshot -- [X] create_cluster_subnet_group -- [ ] create_event_subscription -- [ ] create_hsm_client_certificate -- [ ] create_hsm_configuration -- [X] create_snapshot_copy_grant -- [X] create_tags -- [X] delete_cluster -- [X] delete_cluster_parameter_group -- [X] delete_cluster_security_group -- [X] delete_cluster_snapshot -- [X] delete_cluster_subnet_group -- [ ] delete_event_subscription -- [ ] delete_hsm_client_certificate -- [ ] delete_hsm_configuration -- [X] delete_snapshot_copy_grant -- [X] delete_tags -- [X] describe_cluster_parameter_groups -- [ ] describe_cluster_parameters -- [X] describe_cluster_security_groups -- [X] describe_cluster_snapshots -- [X] describe_cluster_subnet_groups -- [ ] describe_cluster_versions -- [X] describe_clusters -- [ ] describe_default_cluster_parameters -- [ ] describe_event_categories -- [ ] describe_event_subscriptions -- [ ] describe_events -- [ ] describe_hsm_client_certificates -- [ ] describe_hsm_configurations -- [ ] describe_logging_status -- [ ] describe_orderable_cluster_options -- [ ] describe_reserved_node_offerings -- [ ] describe_reserved_nodes -- [ ] describe_resize -- [X] describe_snapshot_copy_grants -- [ ] describe_table_restore_status -- [X] describe_tags -- [ ] disable_logging -- [X] disable_snapshot_copy -- [ ] enable_logging -- [X] enable_snapshot_copy -- [ ] get_cluster_credentials -- [X] modify_cluster -- [ ] modify_cluster_iam_roles -- [ ] modify_cluster_parameter_group -- [ ] modify_cluster_subnet_group -- [ ] modify_event_subscription -- [X] modify_snapshot_copy_retention_period -- [ ] purchase_reserved_node_offering -- [ ] reboot_cluster -- [ ] reset_cluster_parameter_group -- [X] restore_from_cluster_snapshot -- [ ] restore_table_from_cluster_snapshot -- [ ] revoke_cluster_security_group_ingress -- [ ] revoke_snapshot_access -- [ ] rotate_encryption_key - -## rekognition - 0% implemented -- [ ] compare_faces -- [ ] create_collection -- [ ] create_stream_processor -- [ ] delete_collection -- [ ] delete_faces -- [ ] delete_stream_processor -- [ ] describe_stream_processor -- [ ] detect_faces -- [ ] detect_labels -- [ ] detect_moderation_labels -- [ ] detect_text -- [ ] get_celebrity_info -- [ ] get_celebrity_recognition -- [ ] get_content_moderation -- [ ] get_face_detection -- [ ] get_face_search -- [ ] get_label_detection -- [ ] get_person_tracking -- [ ] index_faces -- [ ] list_collections -- [ ] list_faces -- [ ] list_stream_processors -- [ ] recognize_celebrities -- [ ] search_faces -- [ ] search_faces_by_image -- [ ] start_celebrity_recognition -- [ ] start_content_moderation -- [ ] start_face_detection -- [ ] start_face_search -- [ ] start_label_detection -- [ ] start_person_tracking -- [ ] start_stream_processor -- [ ] stop_stream_processor - -## resource-groups - 0% implemented -- [ ] create_group -- [ ] delete_group -- [ ] get_group -- [ ] get_group_query -- [ ] get_tags -- [ ] list_group_resources -- [ ] list_groups -- [ ] search_resources -- [ ] tag -- [ ] untag -- [ ] update_group -- [ ] update_group_query - -## resourcegroupstaggingapi - 60% implemented -- [X] get_resources -- [X] get_tag_keys -- [X] get_tag_values -- [ ] tag_resources -- [ ] untag_resources - -## route53 - 12% implemented -- [ ] associate_vpc_with_hosted_zone -- [ ] change_resource_record_sets -- [X] change_tags_for_resource -- [X] create_health_check -- [X] create_hosted_zone -- [ ] create_query_logging_config -- [ ] create_reusable_delegation_set -- [ ] create_traffic_policy -- [ ] create_traffic_policy_instance -- [ ] create_traffic_policy_version -- [ ] create_vpc_association_authorization -- [X] delete_health_check -- [X] delete_hosted_zone -- [ ] delete_query_logging_config -- [ ] delete_reusable_delegation_set -- [ ] delete_traffic_policy -- [ ] delete_traffic_policy_instance -- [ ] delete_vpc_association_authorization -- [ ] disassociate_vpc_from_hosted_zone -- [ ] get_account_limit -- [ ] get_change -- [ ] get_checker_ip_ranges -- [ ] get_geo_location -- [ ] get_health_check -- [ ] get_health_check_count -- [ ] get_health_check_last_failure_reason -- [ ] get_health_check_status -- [X] get_hosted_zone -- [ ] get_hosted_zone_count -- [ ] get_hosted_zone_limit -- [ ] get_query_logging_config -- [ ] get_reusable_delegation_set -- [ ] get_reusable_delegation_set_limit -- [ ] get_traffic_policy -- [ ] get_traffic_policy_instance -- [ ] get_traffic_policy_instance_count -- [ ] list_geo_locations -- [ ] list_health_checks -- [ ] list_hosted_zones -- [ ] list_hosted_zones_by_name -- [ ] list_query_logging_configs -- [ ] list_resource_record_sets -- [ ] list_reusable_delegation_sets -- [X] list_tags_for_resource -- [ ] list_tags_for_resources -- [ ] list_traffic_policies -- [ ] list_traffic_policy_instances -- [ ] list_traffic_policy_instances_by_hosted_zone -- [ ] list_traffic_policy_instances_by_policy -- [ ] list_traffic_policy_versions -- [ ] list_vpc_association_authorizations -- [ ] test_dns_answer -- [ ] update_health_check -- [ ] update_hosted_zone_comment -- [ ] update_traffic_policy_comment -- [ ] update_traffic_policy_instance - -## route53domains - 0% implemented -- [ ] check_domain_availability -- [ ] check_domain_transferability -- [ ] delete_tags_for_domain -- [ ] disable_domain_auto_renew -- [ ] disable_domain_transfer_lock -- [ ] enable_domain_auto_renew -- [ ] enable_domain_transfer_lock -- [ ] get_contact_reachability_status -- [ ] get_domain_detail -- [ ] get_domain_suggestions -- [ ] get_operation_detail -- [ ] list_domains -- [ ] list_operations -- [ ] list_tags_for_domain -- [ ] register_domain -- [ ] renew_domain -- [ ] resend_contact_reachability_email -- [ ] retrieve_domain_auth_code -- [ ] transfer_domain -- [ ] update_domain_contact -- [ ] update_domain_contact_privacy -- [ ] update_domain_nameservers -- [ ] update_tags_for_domain -- [ ] view_billing - -## s3 - 15% implemented -- [ ] abort_multipart_upload -- [ ] complete_multipart_upload -- [ ] copy_object -- [X] create_bucket -- [ ] create_multipart_upload -- [X] delete_bucket -- [ ] delete_bucket_analytics_configuration -- [X] delete_bucket_cors -- [ ] delete_bucket_encryption -- [ ] delete_bucket_inventory_configuration -- [ ] delete_bucket_lifecycle -- [ ] delete_bucket_metrics_configuration -- [X] delete_bucket_policy -- [ ] delete_bucket_replication -- [X] delete_bucket_tagging -- [ ] delete_bucket_website -- [ ] delete_object -- [ ] delete_object_tagging -- [ ] delete_objects -- [ ] get_bucket_accelerate_configuration -- [X] get_bucket_acl -- [ ] get_bucket_analytics_configuration -- [ ] get_bucket_cors -- [ ] get_bucket_encryption -- [ ] get_bucket_inventory_configuration -- [ ] get_bucket_lifecycle -- [ ] get_bucket_lifecycle_configuration -- [ ] get_bucket_location -- [ ] get_bucket_logging -- [ ] get_bucket_metrics_configuration -- [ ] get_bucket_notification -- [ ] get_bucket_notification_configuration -- [X] get_bucket_policy -- [ ] get_bucket_replication -- [ ] get_bucket_request_payment -- [ ] get_bucket_tagging -- [X] get_bucket_versioning -- [ ] get_bucket_website -- [ ] get_object -- [ ] get_object_acl -- [ ] get_object_tagging -- [ ] get_object_torrent -- [ ] head_bucket -- [ ] head_object -- [ ] list_bucket_analytics_configurations -- [ ] list_bucket_inventory_configurations -- [ ] list_bucket_metrics_configurations -- [ ] list_buckets -- [ ] list_multipart_uploads -- [ ] list_object_versions -- [ ] list_objects -- [ ] list_objects_v2 -- [ ] list_parts -- [ ] put_bucket_accelerate_configuration -- [ ] put_bucket_acl -- [ ] put_bucket_analytics_configuration -- [X] put_bucket_cors -- [ ] put_bucket_encryption -- [ ] put_bucket_inventory_configuration -- [ ] put_bucket_lifecycle -- [ ] put_bucket_lifecycle_configuration -- [X] put_bucket_logging -- [ ] put_bucket_metrics_configuration -- [ ] put_bucket_notification -- [X] put_bucket_notification_configuration -- [ ] put_bucket_policy -- [ ] put_bucket_replication -- [ ] put_bucket_request_payment -- [X] put_bucket_tagging -- [ ] put_bucket_versioning -- [ ] put_bucket_website -- [ ] put_object -- [ ] put_object_acl -- [ ] put_object_tagging -- [ ] restore_object -- [ ] select_object_content -- [ ] upload_part -- [ ] upload_part_copy - -## sagemaker - 0% implemented -- [ ] add_tags -- [ ] create_endpoint -- [ ] create_endpoint_config -- [ ] create_model -- [ ] create_notebook_instance -- [ ] create_notebook_instance_lifecycle_config -- [ ] create_presigned_notebook_instance_url -- [ ] create_training_job -- [ ] delete_endpoint -- [ ] delete_endpoint_config -- [ ] delete_model -- [ ] delete_notebook_instance -- [ ] delete_notebook_instance_lifecycle_config -- [ ] delete_tags -- [ ] describe_endpoint -- [ ] describe_endpoint_config -- [ ] describe_model -- [ ] describe_notebook_instance -- [ ] describe_notebook_instance_lifecycle_config -- [ ] describe_training_job -- [ ] list_endpoint_configs -- [ ] list_endpoints -- [ ] list_models -- [ ] list_notebook_instance_lifecycle_configs -- [ ] list_notebook_instances -- [ ] list_tags -- [ ] list_training_jobs -- [ ] start_notebook_instance -- [ ] stop_notebook_instance -- [ ] stop_training_job -- [ ] update_endpoint -- [ ] update_endpoint_weights_and_capacities -- [ ] update_notebook_instance -- [ ] update_notebook_instance_lifecycle_config - -## sagemaker-runtime - 0% implemented -- [ ] invoke_endpoint - -## sdb - 0% implemented -- [ ] batch_delete_attributes -- [ ] batch_put_attributes -- [ ] create_domain -- [ ] delete_attributes -- [ ] delete_domain -- [ ] domain_metadata -- [ ] get_attributes -- [ ] list_domains -- [ ] put_attributes -- [ ] select - -## secretsmanager - 33% implemented -- [ ] cancel_rotate_secret -- [X] create_secret -- [ ] delete_secret -- [X] describe_secret -- [X] get_random_password -- [X] get_secret_value -- [ ] list_secret_version_ids -- [ ] list_secrets -- [ ] put_secret_value -- [ ] restore_secret -- [X] rotate_secret -- [ ] tag_resource -- [ ] untag_resource -- [ ] update_secret -- [ ] update_secret_version_stage - -## serverlessrepo - 0% implemented -- [ ] create_application -- [ ] create_application_version -- [ ] create_cloud_formation_change_set -- [ ] delete_application -- [ ] get_application -- [ ] get_application_policy -- [ ] list_application_versions -- [ ] list_applications -- [ ] put_application_policy -- [ ] update_application - -## servicecatalog - 0% implemented -- [ ] accept_portfolio_share -- [ ] associate_principal_with_portfolio -- [ ] associate_product_with_portfolio -- [ ] associate_tag_option_with_resource -- [ ] copy_product -- [ ] create_constraint -- [ ] create_portfolio -- [ ] create_portfolio_share -- [ ] create_product -- [ ] create_provisioned_product_plan -- [ ] create_provisioning_artifact -- [ ] create_tag_option -- [ ] delete_constraint -- [ ] delete_portfolio -- [ ] delete_portfolio_share -- [ ] delete_product -- [ ] delete_provisioned_product_plan -- [ ] delete_provisioning_artifact -- [ ] delete_tag_option -- [ ] describe_constraint -- [ ] describe_copy_product_status -- [ ] describe_portfolio -- [ ] describe_product -- [ ] describe_product_as_admin -- [ ] describe_product_view -- [ ] describe_provisioned_product -- [ ] describe_provisioned_product_plan -- [ ] describe_provisioning_artifact -- [ ] describe_provisioning_parameters -- [ ] describe_record -- [ ] describe_tag_option -- [ ] disassociate_principal_from_portfolio -- [ ] disassociate_product_from_portfolio -- [ ] disassociate_tag_option_from_resource -- [ ] execute_provisioned_product_plan -- [ ] list_accepted_portfolio_shares -- [ ] list_constraints_for_portfolio -- [ ] list_launch_paths -- [ ] list_portfolio_access -- [ ] list_portfolios -- [ ] list_portfolios_for_product -- [ ] list_principals_for_portfolio -- [ ] list_provisioned_product_plans -- [ ] list_provisioning_artifacts -- [ ] list_record_history -- [ ] list_resources_for_tag_option -- [ ] list_tag_options -- [ ] provision_product -- [ ] reject_portfolio_share -- [ ] scan_provisioned_products -- [ ] search_products -- [ ] search_products_as_admin -- [ ] search_provisioned_products -- [ ] terminate_provisioned_product -- [ ] update_constraint -- [ ] update_portfolio -- [ ] update_product -- [ ] update_provisioned_product -- [ ] update_provisioning_artifact -- [ ] update_tag_option - -## servicediscovery - 0% implemented -- [ ] create_private_dns_namespace -- [ ] create_public_dns_namespace -- [ ] create_service -- [ ] delete_namespace -- [ ] delete_service -- [ ] deregister_instance -- [ ] get_instance -- [ ] get_instances_health_status -- [ ] get_namespace -- [ ] get_operation -- [ ] get_service -- [ ] list_instances -- [ ] list_namespaces -- [ ] list_operations -- [ ] list_services -- [ ] register_instance -- [ ] update_instance_custom_health_status -- [ ] update_service - -## ses - 11% implemented -- [ ] clone_receipt_rule_set -- [ ] create_configuration_set -- [ ] create_configuration_set_event_destination -- [ ] create_configuration_set_tracking_options -- [ ] create_custom_verification_email_template -- [ ] create_receipt_filter -- [ ] create_receipt_rule -- [ ] create_receipt_rule_set -- [ ] create_template -- [ ] delete_configuration_set -- [ ] delete_configuration_set_event_destination -- [ ] delete_configuration_set_tracking_options -- [ ] delete_custom_verification_email_template -- [X] delete_identity -- [ ] delete_identity_policy -- [ ] delete_receipt_filter -- [ ] delete_receipt_rule -- [ ] delete_receipt_rule_set -- [ ] delete_template -- [ ] delete_verified_email_address -- [ ] describe_active_receipt_rule_set -- [ ] describe_configuration_set -- [ ] describe_receipt_rule -- [ ] describe_receipt_rule_set -- [ ] get_account_sending_enabled -- [ ] get_custom_verification_email_template -- [ ] get_identity_dkim_attributes -- [ ] get_identity_mail_from_domain_attributes -- [ ] get_identity_notification_attributes -- [ ] get_identity_policies -- [ ] get_identity_verification_attributes -- [X] get_send_quota -- [ ] get_send_statistics -- [ ] get_template -- [ ] list_configuration_sets -- [ ] list_custom_verification_email_templates -- [X] list_identities -- [ ] list_identity_policies -- [ ] list_receipt_filters -- [ ] list_receipt_rule_sets -- [ ] list_templates -- [X] list_verified_email_addresses -- [ ] put_identity_policy -- [ ] reorder_receipt_rule_set -- [ ] send_bounce -- [ ] send_bulk_templated_email -- [ ] send_custom_verification_email -- [X] send_email -- [X] send_raw_email -- [ ] send_templated_email -- [ ] set_active_receipt_rule_set -- [ ] set_identity_dkim_enabled -- [ ] set_identity_feedback_forwarding_enabled -- [ ] set_identity_headers_in_notifications_enabled -- [ ] set_identity_mail_from_domain -- [ ] set_identity_notification_topic -- [ ] set_receipt_rule_position -- [ ] test_render_template -- [ ] update_account_sending_enabled -- [ ] update_configuration_set_event_destination -- [ ] update_configuration_set_reputation_metrics_enabled -- [ ] update_configuration_set_sending_enabled -- [ ] update_configuration_set_tracking_options -- [ ] update_custom_verification_email_template -- [ ] update_receipt_rule -- [ ] update_template -- [ ] verify_domain_dkim -- [ ] verify_domain_identity -- [X] verify_email_address -- [X] verify_email_identity - -## shield - 0% implemented -- [ ] create_protection -- [ ] create_subscription -- [ ] delete_protection -- [ ] delete_subscription -- [ ] describe_attack -- [ ] describe_protection -- [ ] describe_subscription -- [ ] get_subscription_state -- [ ] list_attacks -- [ ] list_protections - -## sms - 0% implemented -- [ ] create_replication_job -- [ ] delete_replication_job -- [ ] delete_server_catalog -- [ ] disassociate_connector -- [ ] get_connectors -- [ ] get_replication_jobs -- [ ] get_replication_runs -- [ ] get_servers -- [ ] import_server_catalog -- [ ] start_on_demand_replication_run -- [ ] update_replication_job - -## snowball - 0% implemented -- [ ] cancel_cluster -- [ ] cancel_job -- [ ] create_address -- [ ] create_cluster -- [ ] create_job -- [ ] describe_address -- [ ] describe_addresses -- [ ] describe_cluster -- [ ] describe_job -- [ ] get_job_manifest -- [ ] get_job_unlock_code -- [ ] get_snowball_usage -- [ ] list_cluster_jobs -- [ ] list_clusters -- [ ] list_jobs -- [ ] update_cluster -- [ ] update_job - -## sns - 53% implemented -- [ ] add_permission -- [ ] check_if_phone_number_is_opted_out -- [ ] confirm_subscription -- [X] create_platform_application -- [X] create_platform_endpoint -- [X] create_topic -- [X] delete_endpoint -- [X] delete_platform_application -- [X] delete_topic -- [ ] get_endpoint_attributes -- [ ] get_platform_application_attributes -- [ ] get_sms_attributes -- [X] get_subscription_attributes -- [ ] get_topic_attributes -- [X] list_endpoints_by_platform_application -- [ ] list_phone_numbers_opted_out -- [X] list_platform_applications -- [X] list_subscriptions -- [ ] list_subscriptions_by_topic -- [X] list_topics -- [ ] opt_in_phone_number -- [X] publish -- [ ] remove_permission -- [X] set_endpoint_attributes -- [ ] set_platform_application_attributes -- [ ] set_sms_attributes -- [X] set_subscription_attributes -- [ ] set_topic_attributes -- [X] subscribe -- [X] unsubscribe - -## sqs - 65% implemented -- [X] add_permission -- [X] change_message_visibility -- [ ] change_message_visibility_batch -- [X] create_queue -- [X] delete_message -- [ ] delete_message_batch -- [X] delete_queue -- [ ] get_queue_attributes -- [ ] get_queue_url -- [X] list_dead_letter_source_queues -- [ ] list_queue_tags -- [X] list_queues -- [X] purge_queue -- [ ] receive_message -- [X] remove_permission -- [X] send_message -- [ ] send_message_batch -- [X] set_queue_attributes -- [X] tag_queue -- [X] untag_queue - -## ssm - 11% implemented -- [X] add_tags_to_resource -- [ ] cancel_command -- [ ] create_activation -- [ ] create_association -- [ ] create_association_batch -- [ ] create_document -- [ ] create_maintenance_window -- [ ] create_patch_baseline -- [ ] create_resource_data_sync -- [ ] delete_activation -- [ ] delete_association -- [ ] delete_document -- [ ] delete_maintenance_window -- [X] delete_parameter -- [X] delete_parameters -- [ ] delete_patch_baseline -- [ ] delete_resource_data_sync -- [ ] deregister_managed_instance -- [ ] deregister_patch_baseline_for_patch_group -- [ ] deregister_target_from_maintenance_window -- [ ] deregister_task_from_maintenance_window -- [ ] describe_activations -- [ ] describe_association -- [ ] describe_automation_executions -- [ ] describe_automation_step_executions -- [ ] describe_available_patches -- [ ] describe_document -- [ ] describe_document_permission -- [ ] describe_effective_instance_associations -- [ ] describe_effective_patches_for_patch_baseline -- [ ] describe_instance_associations_status -- [ ] describe_instance_information -- [ ] describe_instance_patch_states -- [ ] describe_instance_patch_states_for_patch_group -- [ ] describe_instance_patches -- [ ] describe_maintenance_window_execution_task_invocations -- [ ] describe_maintenance_window_execution_tasks -- [ ] describe_maintenance_window_executions -- [ ] describe_maintenance_window_targets -- [ ] describe_maintenance_window_tasks -- [ ] describe_maintenance_windows -- [ ] describe_parameters -- [ ] describe_patch_baselines -- [ ] describe_patch_group_state -- [ ] describe_patch_groups -- [ ] get_automation_execution -- [ ] get_command_invocation -- [ ] get_default_patch_baseline -- [ ] get_deployable_patch_snapshot_for_instance -- [ ] get_document -- [ ] get_inventory -- [ ] get_inventory_schema -- [ ] get_maintenance_window -- [ ] get_maintenance_window_execution -- [ ] get_maintenance_window_execution_task -- [ ] get_maintenance_window_execution_task_invocation -- [ ] get_maintenance_window_task -- [X] get_parameter -- [ ] get_parameter_history -- [X] get_parameters -- [X] get_parameters_by_path -- [ ] get_patch_baseline -- [ ] get_patch_baseline_for_patch_group -- [ ] list_association_versions -- [ ] list_associations -- [ ] list_command_invocations -- [X] list_commands -- [ ] list_compliance_items -- [ ] list_compliance_summaries -- [ ] list_document_versions -- [ ] list_documents -- [ ] list_inventory_entries -- [ ] list_resource_compliance_summaries -- [ ] list_resource_data_sync -- [X] list_tags_for_resource -- [ ] modify_document_permission -- [ ] put_compliance_items -- [ ] put_inventory -- [X] put_parameter -- [ ] register_default_patch_baseline -- [ ] register_patch_baseline_for_patch_group -- [ ] register_target_with_maintenance_window -- [ ] register_task_with_maintenance_window -- [X] remove_tags_from_resource -- [ ] send_automation_signal -- [X] send_command -- [ ] start_automation_execution -- [ ] stop_automation_execution -- [ ] update_association -- [ ] update_association_status -- [ ] update_document -- [ ] update_document_default_version -- [ ] update_maintenance_window -- [ ] update_maintenance_window_target -- [ ] update_maintenance_window_task -- [ ] update_managed_instance_role -- [ ] update_patch_baseline - -## stepfunctions - 0% implemented -- [ ] create_activity -- [ ] create_state_machine -- [ ] delete_activity -- [ ] delete_state_machine -- [ ] describe_activity -- [ ] describe_execution -- [ ] describe_state_machine -- [ ] describe_state_machine_for_execution -- [ ] get_activity_task -- [ ] get_execution_history -- [ ] list_activities -- [ ] list_executions -- [ ] list_state_machines -- [ ] send_task_failure -- [ ] send_task_heartbeat -- [ ] send_task_success -- [ ] start_execution -- [ ] stop_execution -- [ ] update_state_machine - -## storagegateway - 0% implemented -- [ ] activate_gateway -- [ ] add_cache -- [ ] add_tags_to_resource -- [ ] add_upload_buffer -- [ ] add_working_storage -- [ ] cancel_archival -- [ ] cancel_retrieval -- [ ] create_cached_iscsi_volume -- [ ] create_nfs_file_share -- [ ] create_snapshot -- [ ] create_snapshot_from_volume_recovery_point -- [ ] create_stored_iscsi_volume -- [ ] create_tape_with_barcode -- [ ] create_tapes -- [ ] delete_bandwidth_rate_limit -- [ ] delete_chap_credentials -- [ ] delete_file_share -- [ ] delete_gateway -- [ ] delete_snapshot_schedule -- [ ] delete_tape -- [ ] delete_tape_archive -- [ ] delete_volume -- [ ] describe_bandwidth_rate_limit -- [ ] describe_cache -- [ ] describe_cached_iscsi_volumes -- [ ] describe_chap_credentials -- [ ] describe_gateway_information -- [ ] describe_maintenance_start_time -- [ ] describe_nfs_file_shares -- [ ] describe_snapshot_schedule -- [ ] describe_stored_iscsi_volumes -- [ ] describe_tape_archives -- [ ] describe_tape_recovery_points -- [ ] describe_tapes -- [ ] describe_upload_buffer -- [ ] describe_vtl_devices -- [ ] describe_working_storage -- [ ] disable_gateway -- [ ] list_file_shares -- [ ] list_gateways -- [ ] list_local_disks -- [ ] list_tags_for_resource -- [ ] list_tapes -- [ ] list_volume_initiators -- [ ] list_volume_recovery_points -- [ ] list_volumes -- [ ] notify_when_uploaded -- [ ] refresh_cache -- [ ] remove_tags_from_resource -- [ ] reset_cache -- [ ] retrieve_tape_archive -- [ ] retrieve_tape_recovery_point -- [ ] set_local_console_password -- [ ] shutdown_gateway -- [ ] start_gateway -- [ ] update_bandwidth_rate_limit -- [ ] update_chap_credentials -- [ ] update_gateway_information -- [ ] update_gateway_software_now -- [ ] update_maintenance_start_time -- [ ] update_nfs_file_share -- [ ] update_snapshot_schedule -- [ ] update_vtl_device_type - -## sts - 42% implemented -- [X] assume_role -- [ ] assume_role_with_saml -- [ ] assume_role_with_web_identity -- [ ] decode_authorization_message -- [ ] get_caller_identity -- [X] get_federation_token -- [X] get_session_token - -## support - 0% implemented -- [ ] add_attachments_to_set -- [ ] add_communication_to_case -- [ ] create_case -- [ ] describe_attachment -- [ ] describe_cases -- [ ] describe_communications -- [ ] describe_services -- [ ] describe_severity_levels -- [ ] describe_trusted_advisor_check_refresh_statuses -- [ ] describe_trusted_advisor_check_result -- [ ] describe_trusted_advisor_check_summaries -- [ ] describe_trusted_advisor_checks -- [ ] refresh_trusted_advisor_check -- [ ] resolve_case - -## swf - 58% implemented -- [ ] count_closed_workflow_executions -- [ ] count_open_workflow_executions -- [X] count_pending_activity_tasks -- [X] count_pending_decision_tasks -- [ ] deprecate_activity_type -- [X] deprecate_domain -- [ ] deprecate_workflow_type -- [ ] describe_activity_type -- [X] describe_domain -- [X] describe_workflow_execution -- [ ] describe_workflow_type -- [ ] get_workflow_execution_history -- [ ] list_activity_types -- [X] list_closed_workflow_executions -- [X] list_domains -- [X] list_open_workflow_executions -- [ ] list_workflow_types -- [X] poll_for_activity_task -- [X] poll_for_decision_task -- [X] record_activity_task_heartbeat -- [ ] register_activity_type -- [X] register_domain -- [ ] register_workflow_type -- [ ] request_cancel_workflow_execution -- [ ] respond_activity_task_canceled -- [X] respond_activity_task_completed -- [X] respond_activity_task_failed -- [X] respond_decision_task_completed -- [X] signal_workflow_execution -- [X] start_workflow_execution -- [X] terminate_workflow_execution - -## transcribe - 0% implemented -- [ ] create_vocabulary -- [ ] delete_vocabulary -- [ ] get_transcription_job -- [ ] get_vocabulary -- [ ] list_transcription_jobs -- [ ] list_vocabularies -- [ ] start_transcription_job -- [ ] update_vocabulary - -## translate - 0% implemented -- [ ] translate_text - -## waf - 0% implemented -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## waf-regional - 0% implemented -- [ ] associate_web_acl -- [ ] create_byte_match_set -- [ ] create_geo_match_set -- [ ] create_ip_set -- [ ] create_rate_based_rule -- [ ] create_regex_match_set -- [ ] create_regex_pattern_set -- [ ] create_rule -- [ ] create_rule_group -- [ ] create_size_constraint_set -- [ ] create_sql_injection_match_set -- [ ] create_web_acl -- [ ] create_xss_match_set -- [ ] delete_byte_match_set -- [ ] delete_geo_match_set -- [ ] delete_ip_set -- [ ] delete_permission_policy -- [ ] delete_rate_based_rule -- [ ] delete_regex_match_set -- [ ] delete_regex_pattern_set -- [ ] delete_rule -- [ ] delete_rule_group -- [ ] delete_size_constraint_set -- [ ] delete_sql_injection_match_set -- [ ] delete_web_acl -- [ ] delete_xss_match_set -- [ ] disassociate_web_acl -- [ ] get_byte_match_set -- [ ] get_change_token -- [ ] get_change_token_status -- [ ] get_geo_match_set -- [ ] get_ip_set -- [ ] get_permission_policy -- [ ] get_rate_based_rule -- [ ] get_rate_based_rule_managed_keys -- [ ] get_regex_match_set -- [ ] get_regex_pattern_set -- [ ] get_rule -- [ ] get_rule_group -- [ ] get_sampled_requests -- [ ] get_size_constraint_set -- [ ] get_sql_injection_match_set -- [ ] get_web_acl -- [ ] get_web_acl_for_resource -- [ ] get_xss_match_set -- [ ] list_activated_rules_in_rule_group -- [ ] list_byte_match_sets -- [ ] list_geo_match_sets -- [ ] list_ip_sets -- [ ] list_rate_based_rules -- [ ] list_regex_match_sets -- [ ] list_regex_pattern_sets -- [ ] list_resources_for_web_acl -- [ ] list_rule_groups -- [ ] list_rules -- [ ] list_size_constraint_sets -- [ ] list_sql_injection_match_sets -- [ ] list_subscribed_rule_groups -- [ ] list_web_acls -- [ ] list_xss_match_sets -- [ ] put_permission_policy -- [ ] update_byte_match_set -- [ ] update_geo_match_set -- [ ] update_ip_set -- [ ] update_rate_based_rule -- [ ] update_regex_match_set -- [ ] update_regex_pattern_set -- [ ] update_rule -- [ ] update_rule_group -- [ ] update_size_constraint_set -- [ ] update_sql_injection_match_set -- [ ] update_web_acl -- [ ] update_xss_match_set - -## workdocs - 0% implemented -- [ ] abort_document_version_upload -- [ ] activate_user -- [ ] add_resource_permissions -- [ ] create_comment -- [ ] create_custom_metadata -- [ ] create_folder -- [ ] create_labels -- [ ] create_notification_subscription -- [ ] create_user -- [ ] deactivate_user -- [ ] delete_comment -- [ ] delete_custom_metadata -- [ ] delete_document -- [ ] delete_folder -- [ ] delete_folder_contents -- [ ] delete_labels -- [ ] delete_notification_subscription -- [ ] delete_user -- [ ] describe_activities -- [ ] describe_comments -- [ ] describe_document_versions -- [ ] describe_folder_contents -- [ ] describe_groups -- [ ] describe_notification_subscriptions -- [ ] describe_resource_permissions -- [ ] describe_root_folders -- [ ] describe_users -- [ ] get_current_user -- [ ] get_document -- [ ] get_document_path -- [ ] get_document_version -- [ ] get_folder -- [ ] get_folder_path -- [ ] initiate_document_version_upload -- [ ] remove_all_resource_permissions -- [ ] remove_resource_permission -- [ ] update_document -- [ ] update_document_version -- [ ] update_folder -- [ ] update_user - -## workmail - 0% implemented -- [ ] associate_delegate_to_resource -- [ ] associate_member_to_group -- [ ] create_alias -- [ ] create_group -- [ ] create_resource -- [ ] create_user -- [ ] delete_alias -- [ ] delete_group -- [ ] delete_mailbox_permissions -- [ ] delete_resource -- [ ] delete_user -- [ ] deregister_from_work_mail -- [ ] describe_group -- [ ] describe_organization -- [ ] describe_resource -- [ ] describe_user -- [ ] disassociate_delegate_from_resource -- [ ] disassociate_member_from_group -- [ ] list_aliases -- [ ] list_group_members -- [ ] list_groups -- [ ] list_mailbox_permissions -- [ ] list_organizations -- [ ] list_resource_delegates -- [ ] list_resources -- [ ] list_users -- [ ] put_mailbox_permissions -- [ ] register_to_work_mail -- [ ] reset_password -- [ ] update_primary_email_address -- [ ] update_resource - -## workspaces - 0% implemented -- [ ] create_tags -- [ ] create_workspaces -- [ ] delete_tags -- [ ] describe_tags -- [ ] describe_workspace_bundles -- [ ] describe_workspace_directories -- [ ] describe_workspaces -- [ ] describe_workspaces_connection_status -- [ ] modify_workspace_properties -- [ ] reboot_workspaces -- [ ] rebuild_workspaces -- [ ] start_workspaces -- [ ] stop_workspaces -- [ ] terminate_workspaces - -## xray - 0% implemented -- [ ] batch_get_traces -- [ ] get_service_graph -- [ ] get_trace_graph -- [ ] get_trace_summaries -- [ ] put_telemetry_records -- [ ] put_trace_segments + +## acm - 41% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [ ] export_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email +- [ ] update_certificate_options + +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] restore_certificate_authority +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_device_events +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 24% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [X] create_usage_plan +- [X] create_usage_plan_key +- [ ] create_vpc_link +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [X] delete_usage_plan +- [X] delete_usage_plan_key +- [ ] delete_vpc_link +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_tags +- [ ] get_usage +- [X] get_usage_plan +- [X] get_usage_plan_key +- [X] get_usage_plan_keys +- [X] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] tag_resource +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] untag_resource +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan +- [ ] update_vpc_link + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] copy_image +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_image_permissions +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_image_permissions +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] list_tags_for_resource +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_image_permissions +- [ ] update_stack + +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 42% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] batch_delete_scheduled_action +- [ ] batch_put_scheduled_update_group_action +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [X] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans +- [ ] update_scaling_plan + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_applied_schema_version +- [ ] get_directory +- [ ] get_facet +- [ ] get_link_attributes +- [ ] get_object_attributes +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_managed_schema_arns +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_link_attributes +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema + +## cloudformation - 21% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [X] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [X] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_instances +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile +- [ ] create_invalidation +- [ ] create_public_key +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config +- [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles +- [ ] list_invalidations +- [ ] list_public_keys +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] copy_backup_to_region +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 56% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_data +- [X] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] invalidate_project_cache +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project +- [ ] update_webhook + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_pull_request +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_comment_content +- [ ] delete_repository +- [ ] describe_pull_request_events +- [ ] get_blob +- [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request +- [ ] get_commit +- [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_pull_requests +- [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_comment +- [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] delete_git_hub_account_token +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] delete_webhook +- [ ] deregister_webhook_with_third_party +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] list_webhooks +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] put_webhook +- [ ] register_webhook_with_third_party +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 22% implemented +- [X] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [X] get_credentials_for_identity +- [X] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [X] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 27% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [X] admin_create_user +- [X] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [X] admin_disable_user +- [X] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [X] admin_get_user +- [X] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference +- [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] associate_software_token +- [X] change_password +- [ ] confirm_device +- [X] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [X] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [X] create_user_pool +- [X] create_user_pool_client +- [X] create_user_pool_domain +- [ ] delete_group +- [X] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [X] delete_user_pool +- [X] delete_user_pool_client +- [X] delete_user_pool_domain +- [X] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_risk_configuration +- [ ] describe_user_import_job +- [X] describe_user_pool +- [X] describe_user_pool_client +- [X] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [X] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [X] list_user_pool_clients +- [X] list_user_pools +- [X] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [X] respond_to_auth_challenge +- [ ] set_risk_configuration +- [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_auth_event_feedback +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [X] update_user_pool_client +- [ ] verify_software_token +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] batch_detect_syntax +- [ ] describe_dominant_language_detection_job +- [ ] describe_entities_detection_job +- [ ] describe_key_phrases_detection_job +- [ ] describe_sentiment_detection_job +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] detect_syntax +- [ ] list_dominant_language_detection_jobs +- [ ] list_entities_detection_jobs +- [ ] list_key_phrases_detection_jobs +- [ ] list_sentiment_detection_jobs +- [ ] list_topics_detection_jobs +- [ ] start_dominant_language_detection_job +- [ ] start_entities_detection_job +- [ ] start_key_phrases_detection_job +- [ ] start_sentiment_detection_job +- [ ] start_topics_detection_job +- [ ] stop_dominant_language_detection_job +- [ ] stop_entities_detection_job +- [ ] stop_key_phrases_detection_job +- [ ] stop_sentiment_detection_job + +## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization +- [ ] delete_config_rule +- [ ] delete_configuration_aggregator +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request +- [ ] delete_retention_configuration +- [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] describe_retention_configurations +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_aggregation_authorization +- [ ] put_config_rule +- [ ] put_configuration_aggregator +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] put_retention_configuration +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## connect - 0% implemented +- [ ] create_user +- [ ] delete_user +- [ ] describe_user +- [ ] describe_user_hierarchy_group +- [ ] describe_user_hierarchy_structure +- [ ] get_federation_token +- [ ] list_routing_profiles +- [ ] list_security_profiles +- [ ] list_user_hierarchy_groups +- [ ] list_users +- [ ] start_outbound_voice_contact +- [ ] stop_contact +- [ ] update_user_hierarchy +- [ ] update_user_identity_info +- [ ] update_user_phone_config +- [ ] update_user_routing_profile +- [ ] update_user_security_profiles + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_instance_profile +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] create_vpce_configuration +- [ ] delete_device_pool +- [ ] delete_instance_profile +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] delete_vpce_configuration +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_instance +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_instance_profile +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] get_vpce_configuration +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_instances +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_instance_profiles +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] list_vpce_configurations +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_job +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_instance +- [ ] update_device_pool +- [ ] update_instance_profile +- [ ] update_network_profile +- [ ] update_project +- [ ] update_upload +- [ ] update_vpce_configuration + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_continuous_exports +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_continuous_export +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_continuous_export +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dlm - 0% implemented +- [ ] create_lifecycle_policy +- [ ] delete_lifecycle_policy +- [ ] get_lifecycle_policies +- [ ] get_lifecycle_policy +- [ ] update_lifecycle_policy + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] reboot_replication_instance +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] reset_user_password +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 21% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table +- [X] create_table +- [ ] delete_backup +- [X] delete_item +- [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table +- [ ] describe_global_table_settings +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_backups +- [ ] list_global_tables +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table +- [ ] update_global_table_settings +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 36% implemented +- [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [X] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [X] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_fleet +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_fleets +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [ ] describe_aggregate_id_format +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_fleet_history +- [ ] describe_fleet_instances +- [ ] describe_fleets +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [ ] describe_principal_id_format +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [X] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fleet +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_credit_specification +- [ ] modify_instance_placement +- [ ] modify_launch_template +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [ ] reject_vpc_endpoint_connections +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 31% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [X] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups +- [ ] update_file_system + +## eks - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] list_clusters + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_account_attributes +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] describe_reserved_elasticsearch_instance_offerings +- [ ] describe_reserved_elasticsearch_instances +- [ ] get_compatible_elasticsearch_versions +- [ ] get_upgrade_history +- [ ] get_upgrade_status +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] purchase_reserved_elasticsearch_instance_offering +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config +- [ ] upgrade_elasticsearch_domain + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] list_delivery_streams +- [ ] list_tags_for_delivery_stream +- [ ] put_record +- [ ] put_record_batch +- [ ] tag_delivery_stream +- [ ] untag_delivery_stream +- [ ] update_destination + +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_fleet_actions +- [ ] start_game_session_placement +- [ ] start_match_backfill +- [ ] start_matchmaking +- [ ] stop_fleet_actions +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 6% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_delete_table_version +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [X] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [X] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_table_version +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [X] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [X] get_table +- [ ] get_table_version +- [ ] get_table_versions +- [X] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_resource_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_resource_definition +- [ ] update_subscription_definition + +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_filter +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_filter +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_filter +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_filters +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_filter +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 47% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [ ] delete_role_permissions_boundary +- [X] delete_role_policy +- [ ] delete_saml_provider +- [X] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [ ] delete_user_permissions_boundary +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [X] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [ ] put_role_permissions_boundary +- [X] put_role_policy +- [ ] put_user_permissions_boundary +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [X] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_exclusions_preview +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_exclusions +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_exclusions_preview +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_exclusions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 31% implemented +- [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [X] attach_policy +- [X] attach_principal_policy +- [ ] attach_security_profile +- [X] attach_thing_principal +- [ ] cancel_audit_task +- [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] cancel_job_execution +- [ ] clear_default_authorizer +- [ ] create_authorizer +- [ ] create_certificate_from_csr +- [X] create_job +- [X] create_keys_and_certificate +- [ ] create_ota_update +- [X] create_policy +- [X] create_policy_version +- [ ] create_role_alias +- [ ] create_scheduled_audit +- [ ] create_security_profile +- [ ] create_stream +- [X] create_thing +- [X] create_thing_group +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_account_audit_configuration +- [ ] delete_authorizer +- [ ] delete_ca_certificate +- [X] delete_certificate +- [ ] delete_job +- [ ] delete_job_execution +- [ ] delete_ota_update +- [X] delete_policy +- [X] delete_policy_version +- [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_scheduled_audit +- [ ] delete_security_profile +- [ ] delete_stream +- [X] delete_thing +- [X] delete_thing_group +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] delete_v2_logging_level +- [ ] deprecate_thing_type +- [ ] describe_account_audit_configuration +- [ ] describe_audit_task +- [ ] describe_authorizer +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_default_authorizer +- [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [X] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_scheduled_audit +- [ ] describe_security_profile +- [ ] describe_stream +- [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task +- [X] describe_thing_type +- [X] detach_policy +- [X] detach_principal_policy +- [ ] detach_security_profile +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [X] get_job_document +- [ ] get_logging_options +- [ ] get_ota_update +- [X] get_policy +- [X] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_active_violations +- [X] list_attached_policies +- [ ] list_audit_findings +- [ ] list_audit_tasks +- [ ] list_authorizers +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [X] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_scheduled_audits +- [ ] list_security_profiles +- [ ] list_security_profiles_for_target +- [ ] list_streams +- [ ] list_targets_for_policy +- [ ] list_targets_for_security_profile +- [X] list_thing_groups +- [X] list_thing_groups_for_thing +- [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks +- [X] list_thing_types +- [X] list_things +- [X] list_things_in_thing_group +- [ ] list_topic_rules +- [ ] list_v2_logging_levels +- [ ] list_violation_events +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] register_thing +- [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group +- [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer +- [X] set_default_policy_version +- [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_on_demand_audit_task +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer +- [ ] transfer_certificate +- [ ] update_account_audit_configuration +- [ ] update_authorizer +- [ ] update_ca_certificate +- [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_scheduled_audit +- [ ] update_security_profile +- [ ] update_stream +- [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing +- [ ] validate_security_profile_behaviors + +## iot-data - 100% implemented +- [X] delete_thing_shadow +- [X] get_thing_shadow +- [X] publish +- [X] update_thing_shadow + +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## iot1click-devices - 0% implemented +- [ ] claim_devices_by_claim_code +- [ ] describe_device +- [ ] finalize_device_claim +- [ ] get_device_methods +- [ ] initiate_device_claim +- [ ] invoke_device_method +- [ ] list_device_events +- [ ] list_devices +- [ ] unclaim_device +- [ ] update_device_state + +## iot1click-projects - 0% implemented +- [ ] associate_device_with_placement +- [ ] create_placement +- [ ] create_project +- [ ] delete_placement +- [ ] delete_project +- [ ] describe_placement +- [ ] describe_project +- [ ] disassociate_device_from_placement +- [ ] get_devices_in_placement +- [ ] list_placements +- [ ] list_projects +- [ ] update_placement +- [ ] update_project + +## iotanalytics - 0% implemented +- [ ] batch_put_message +- [ ] cancel_pipeline_reprocessing +- [ ] create_channel +- [ ] create_dataset +- [ ] create_dataset_content +- [ ] create_datastore +- [ ] create_pipeline +- [ ] delete_channel +- [ ] delete_dataset +- [ ] delete_dataset_content +- [ ] delete_datastore +- [ ] delete_pipeline +- [ ] describe_channel +- [ ] describe_dataset +- [ ] describe_datastore +- [ ] describe_logging_options +- [ ] describe_pipeline +- [ ] get_dataset_content +- [ ] list_channels +- [ ] list_datasets +- [ ] list_datastores +- [ ] list_pipelines +- [ ] list_tags_for_resource +- [ ] put_logging_options +- [ ] run_pipeline_activity +- [ ] sample_channel_data +- [ ] start_pipeline_reprocessing +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_channel +- [ ] update_dataset +- [ ] update_datastore +- [ ] update_pipeline + +## kinesis - 46% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] deregister_stream_consumer +- [ ] describe_limits +- [X] describe_stream +- [ ] describe_stream_consumer +- [ ] describe_stream_summary +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [ ] list_shards +- [ ] list_stream_consumers +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [ ] register_stream_consumer +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] subscribe_to_shard +- [ ] update_shard_count + +## kinesis-video-archived-media - 0% implemented +- [ ] get_hls_streaming_session_url +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + +## kms - 37% implemented +- [X] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [X] disable_key +- [X] disable_key_rotation +- [X] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [X] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] delete_function_concurrency +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] put_function_concurrency +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_import +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type +- [ ] start_import + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate +- [ ] detach_disk +- [ ] detach_instances_from_load_balancer +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry +- [ ] update_load_balancer_attribute + +## logs - 27% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [X] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## macie - 0% implemented +- [ ] associate_member_account +- [ ] associate_s3_resources +- [ ] disassociate_member_account +- [ ] disassociate_s3_resources +- [ ] list_member_accounts +- [ ] list_s3_resources +- [ ] update_s3_resources + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] batch_update_schedule +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] delete_reservation +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] describe_offering +- [ ] describe_reservation +- [ ] describe_schedule +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] list_offerings +- [ ] list_reservations +- [ ] purchase_offering +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + +## mediatailor - 0% implemented +- [ ] delete_playback_configuration +- [ ] get_playback_configuration +- [ ] list_playback_configurations +- [ ] put_playback_configuration + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## neptune - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_parameter_group +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_valid_db_instance_modifications +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] promote_read_replica_db_cluster +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time + +## opsworks - 12% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [X] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [X] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_operating_systems +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 30% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [X] create_account +- [X] create_organization +- [X] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [X] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [X] describe_organization +- [X] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [X] list_accounts +- [X] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [X] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [X] list_organizational_units_for_parent +- [X] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [X] list_roots +- [ ] list_targets_for_policy +- [X] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pi - 0% implemented +- [ ] describe_dimension_keys +- [ ] get_resource_metrics + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_export_job +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_endpoint +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] delete_user_endpoints +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_channels +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_export_jobs +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] get_user_endpoints +- [ ] phone_number_validate +- [ ] put_event_stream +- [ ] put_events +- [ ] remove_attributes +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 55% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [ ] get_speech_synthesis_task +- [X] list_lexicons +- [ ] list_speech_synthesis_tasks +- [X] put_lexicon +- [ ] start_speech_synthesis_task +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] backtrack_db_cluster +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_current_db_cluster_capacity +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 37% implemented +- [ ] accept_reserved_node_exchange +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [X] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [X] delete_snapshot_copy_grant +- [X] delete_tags +- [ ] describe_cluster_db_revisions +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_tracks +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [X] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [X] disable_snapshot_copy +- [ ] enable_logging +- [X] enable_snapshot_copy +- [ ] get_cluster_credentials +- [ ] get_reserved_node_exchange_offerings +- [X] modify_cluster +- [ ] modify_cluster_db_revision +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [X] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [ ] resize_cluster +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] create_stream_processor +- [ ] delete_collection +- [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_collection +- [ ] describe_stream_processor +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] detect_text +- [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] list_stream_processors +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 15% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [X] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [X] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] select_object_content +- [ ] upload_part +- [ ] upload_part_copy + +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_hyper_parameter_tuning_job +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] create_transform_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_hyper_parameter_tuning_job +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] describe_transform_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_hyper_parameter_tuning_jobs +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] list_training_jobs_for_hyper_parameter_tuning_job +- [ ] list_transform_jobs +- [ ] start_notebook_instance +- [ ] stop_hyper_parameter_tuning_job +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] stop_transform_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## secretsmanager - 27% implemented +- [ ] cancel_rotate_secret +- [X] create_secret +- [ ] delete_resource_policy +- [ ] delete_secret +- [X] describe_secret +- [X] get_random_password +- [ ] get_resource_policy +- [X] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_resource_policy +- [ ] put_secret_value +- [ ] restore_secret +- [X] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioned_product_plan +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioned_product_plan +- [ ] delete_provisioning_artifact +- [ ] delete_tag_option +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] search_provisioned_products +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [ ] send_custom_verification_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] associate_drt_log_bucket +- [ ] associate_drt_role +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_drt_access +- [ ] describe_emergency_contact_settings +- [ ] describe_protection +- [ ] describe_subscription +- [ ] disassociate_drt_log_bucket +- [ ] disassociate_drt_role +- [ ] get_subscription_state +- [ ] list_attacks +- [ ] list_protections +- [ ] update_emergency_contact_settings +- [ ] update_subscription + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_compatible_images +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 65% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [X] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 11% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_inventory +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_association_execution_targets +- [ ] describe_association_executions +- [ ] describe_automation_executions +- [ ] describe_automation_step_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_inventory_deletions +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [X] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] label_parameter_version +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [X] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [X] send_command +- [ ] start_associations_once +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_smb_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_smb_file_shares +- [ ] describe_smb_settings +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] join_domain +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] notify_when_uploaded +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] set_smb_guest_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_smb_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 58% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [X] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_rule_group +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_permission_policy +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_rule_group +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_permission_policy +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_rule_group +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rule_groups +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] put_permission_policy +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_rule_group +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_groups +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_mailbox_permissions +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_mailbox_permissions +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] put_mailbox_permissions +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + +## workspaces - 0% implemented +- [ ] associate_ip_groups +- [ ] authorize_ip_rules +- [ ] create_ip_group +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_ip_group +- [ ] delete_tags +- [ ] describe_ip_groups +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] disassociate_ip_groups +- [ ] modify_workspace_properties +- [ ] modify_workspace_state +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] revoke_ip_rules +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces +- [ ] update_rules_of_ip_group + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_encryption_config +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_encryption_config +- [ ] put_telemetry_records +- [ ] put_trace_segments From e51d1bfade08dc11bde27990e118ff38aa654476 Mon Sep 17 00:00:00 2001 From: Stephan Date: Fri, 21 Dec 2018 12:28:56 +0100 Subject: [PATCH 008/125] merge --- .travis.yml | 100 +- requirements-dev.txt | 34 +- setup.py | 142 +- .../single_instance_with_ebs_volume.py | 690 +-- tests/test_cloudformation/fixtures/vpc_eip.py | 24 +- tests/test_cloudformation/fixtures/vpc_eni.py | 68 +- .../fixtures/vpc_single_instance_in_subnet.py | 816 +-- .../test_cloudformation_stack_crud.py | 1344 ++--- .../test_cloudformation_stack_crud_boto3.py | 1590 ++--- .../test_cloudformation_stack_integration.py | 4854 ++++++++-------- .../test_cloudformation/test_import_value.py | 174 +- tests/test_cloudformation/test_server.py | 66 +- .../test_cloudformation/test_stack_parsing.py | 942 +-- tests/test_cloudwatch/test_cloudwatch.py | 246 +- .../test_cloudwatch/test_cloudwatch_boto3.py | 448 +- .../test_cognitoidentity.py | 170 +- tests/test_cognitoidentity/test_server.py | 90 +- tests/test_cognitoidp/test_cognitoidp.py | 1202 ++-- tests/test_core/test_decorator_calls.py | 196 +- tests/test_core/test_instance_metadata.py | 92 +- tests/test_core/test_moto_api.py | 66 +- tests/test_core/test_nested.py | 58 +- tests/test_core/test_responses.py | 162 +- tests/test_core/test_server.py | 106 +- tests/test_core/test_url_mapping.py | 44 +- tests/test_core/test_utils.py | 60 +- tests/test_datapipeline/test_datapipeline.py | 408 +- tests/test_datapipeline/test_server.py | 56 +- tests/test_dynamodb/test_dynamodb.py | 108 +- .../test_dynamodb_table_with_range_key.py | 1052 ++-- .../test_dynamodb_table_without_range_key.py | 860 +-- tests/test_dynamodb/test_server.py | 40 +- tests/test_dynamodb2/test_dynamodb.py | 2676 ++++----- .../test_dynamodb_table_with_range_key.py | 3926 ++++++------- .../test_dynamodb_table_without_range_key.py | 1580 ++--- tests/test_dynamodb2/test_server.py | 38 +- tests/test_ec2/test_account_attributes.py | 88 +- tests/test_ec2/test_amazon_dev_pay.py | 20 +- tests/test_ec2/test_amis.py | 1552 ++--- .../test_availability_zones_and_regions.py | 108 +- tests/test_ec2/test_customer_gateways.py | 104 +- tests/test_ec2/test_dhcp_options.py | 666 +-- tests/test_ec2/test_ec2_core.py | 2 +- tests/test_ec2/test_elastic_block_store.py | 1330 ++--- tests/test_ec2/test_elastic_ip_addresses.py | 1028 ++-- .../test_elastic_network_interfaces.py | 724 +-- tests/test_ec2/test_general.py | 84 +- tests/test_ec2/test_instances.py | 2512 ++++---- tests/test_ec2/test_internet_gateways.py | 538 +- tests/test_ec2/test_ip_addresses.py | 20 +- tests/test_ec2/test_key_pairs.py | 302 +- tests/test_ec2/test_monitoring.py | 20 +- tests/test_ec2/test_nat_gateway.py | 218 +- tests/test_ec2/test_network_acls.py | 350 +- tests/test_ec2/test_placement_groups.py | 20 +- tests/test_ec2/test_regions.py | 296 +- tests/test_ec2/test_reserved_instances.py | 20 +- tests/test_ec2/test_route_tables.py | 1060 ++-- tests/test_ec2/test_security_groups.py | 1474 ++--- tests/test_ec2/test_server.py | 52 +- tests/test_ec2/test_spot_fleet.py | 690 +-- tests/test_ec2/test_spot_instances.py | 536 +- tests/test_ec2/test_subnets.py | 582 +- tests/test_ec2/test_tags.py | 906 +-- tests/test_ec2/test_utils.py | 16 +- .../test_ec2/test_virtual_private_gateways.py | 210 +- tests/test_ec2/test_vm_export.py | 20 +- tests/test_ec2/test_vm_import.py | 20 +- tests/test_ec2/test_vpc_peering.py | 264 +- tests/test_ec2/test_vpcs.py | 1082 ++-- tests/test_ec2/test_vpn_connections.py | 102 +- tests/test_ec2/test_windows.py | 20 +- tests/test_ecr/test_ecr_boto3.py | 1394 ++--- tests/test_ecs/test_ecs_boto3.py | 4428 +++++++------- tests/test_elb/test_elb.py | 1964 +++---- tests/test_elb/test_server.py | 34 +- tests/test_elbv2/test_elbv2.py | 3176 +++++----- tests/test_elbv2/test_server.py | 34 +- tests/test_emr/test_emr.py | 1316 ++--- tests/test_emr/test_emr_boto3.py | 1440 ++--- tests/test_emr/test_server.py | 36 +- tests/test_events/test_events.py | 422 +- tests/test_glacier/test_glacier_archives.py | 42 +- tests/test_glacier/test_glacier_jobs.py | 180 +- tests/test_glacier/test_glacier_server.py | 44 +- tests/test_glacier/test_glacier_vaults.py | 62 +- tests/test_glue/__init__.py | 2 +- tests/test_glue/fixtures/__init__.py | 2 +- tests/test_glue/fixtures/datacatalog.py | 112 +- tests/test_glue/helpers.py | 238 +- tests/test_glue/test_datacatalog.py | 852 +-- tests/test_iam/test_iam.py | 1520 ++--- tests/test_iam/test_iam_account_aliases.py | 40 +- tests/test_iam/test_iam_groups.py | 310 +- tests/test_iam/test_server.py | 52 +- tests/test_iot/test_iot.py | 1752 +++--- tests/test_iot/test_server.py | 38 +- tests/test_iotdata/test_iotdata.py | 186 +- tests/test_iotdata/test_server.py | 40 +- tests/test_kinesis/test_firehose.py | 376 +- tests/test_kinesis/test_kinesis.py | 1248 ++-- tests/test_kinesis/test_server.py | 50 +- tests/test_kms/test_kms.py | 1438 ++--- tests/test_kms/test_server.py | 50 +- tests/test_logs/test_logs.py | 256 +- tests/test_opsworks/test_apps.py | 204 +- tests/test_opsworks/test_instances.py | 448 +- tests/test_opsworks/test_layers.py | 234 +- tests/test_opsworks/test_stack.py | 92 +- .../organizations_test_utils.py | 272 +- .../test_organizations_boto3.py | 644 +- tests/test_polly/test_polly.py | 550 +- tests/test_polly/test_server.py | 38 +- tests/test_rds/test_rds.py | 648 +-- tests/test_rds/test_server.py | 40 +- tests/test_rds2/test_rds2.py | 2944 +++++----- tests/test_rds2/test_server.py | 40 +- tests/test_redshift/test_redshift.py | 2484 ++++---- tests/test_redshift/test_server.py | 44 +- .../test_resourcegroupstaggingapi.py | 570 +- .../test_server.py | 48 +- tests/test_route53/test_route53.py | 1422 ++--- tests/test_s3/test_s3.py | 5166 ++++++++--------- tests/test_s3/test_s3_lifecycle.py | 774 +-- tests/test_s3/test_s3_storageclass.py | 212 +- tests/test_s3/test_s3_utils.py | 160 +- tests/test_s3/test_server.py | 210 +- .../test_bucket_path_server.py | 226 +- .../test_s3bucket_path/test_s3bucket_path.py | 642 +- .../test_s3bucket_path_combo.py | 50 +- .../test_s3bucket_path_utils.py | 32 +- .../test_secretsmanager.py | 572 +- tests/test_secretsmanager/test_server.py | 842 +-- tests/test_ses/test_server.py | 32 +- tests/test_ses/test_ses.py | 232 +- tests/test_ses/test_ses_boto3.py | 388 +- tests/test_sns/test_application.py | 616 +- tests/test_sns/test_application_boto3.py | 700 +-- tests/test_sns/test_publishing.py | 138 +- tests/test_sns/test_publishing_boto3.py | 978 ++-- tests/test_sns/test_server.py | 48 +- tests/test_sns/test_subscriptions.py | 270 +- tests/test_sns/test_subscriptions_boto3.py | 792 +-- tests/test_sns/test_topics.py | 266 +- tests/test_sns/test_topics_boto3.py | 380 +- tests/test_sqs/test_server.py | 170 +- tests/test_sqs/test_sqs.py | 2474 ++++---- tests/test_ssm/test_ssm_boto3.py | 1572 ++--- tests/test_sts/test_server.py | 78 +- tests/test_sts/test_sts.py | 168 +- tests/test_swf/models/test_activity_task.py | 308 +- tests/test_swf/models/test_decision_task.py | 160 +- tests/test_swf/models/test_domain.py | 238 +- tests/test_swf/models/test_generic_type.py | 116 +- tests/test_swf/models/test_history_event.py | 62 +- tests/test_swf/models/test_timeout.py | 38 +- .../models/test_workflow_execution.py | 1002 ++-- .../test_swf/responses/test_activity_tasks.py | 456 +- .../test_swf/responses/test_activity_types.py | 268 +- .../test_swf/responses/test_decision_tasks.py | 684 +-- tests/test_swf/responses/test_domains.py | 238 +- tests/test_swf/responses/test_timeouts.py | 220 +- .../responses/test_workflow_executions.py | 524 +- .../test_swf/responses/test_workflow_types.py | 274 +- tests/test_swf/test_exceptions.py | 316 +- tests/test_swf/test_utils.py | 26 +- tests/test_swf/utils.py | 200 +- tests/test_xray/test_xray_boto3.py | 278 +- tests/test_xray/test_xray_client.py | 144 +- tox.ini | 28 +- travis_moto_server.sh | 8 +- wait_for.py | 62 +- 172 files changed, 49629 insertions(+), 49629 deletions(-) diff --git a/.travis.yml b/.travis.yml index de22818b8..9f3106ad2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,50 +1,50 @@ -language: python -sudo: false -services: - - docker -python: - - 2.7 - - 3.6 -env: - - TEST_SERVER_MODE=false - - TEST_SERVER_MODE=true -# Due to incomplete Python 3.7 support on Travis CI ( -# https://github.com/travis-ci/travis-ci/issues/9815), -# using a matrix is necessary -matrix: - include: - - python: 3.7 - env: TEST_SERVER_MODE=false - dist: xenial - sudo: true - - python: 3.7 - env: TEST_SERVER_MODE=true - dist: xenial - sudo: true -before_install: - - export BOTO_CONFIG=/dev/null -install: - # We build moto first so the docker container doesn't try to compile it as well, also note we don't use - # -d for docker run so the logs show up in travis - # Python images come from here: https://hub.docker.com/_/python/ - - | - python setup.py sdist - - if [ "$TEST_SERVER_MODE" = "true" ]; then - docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & - export AWS_SECRET_ACCESS_KEY=foobar_secret - export AWS_ACCESS_KEY_ID=foobar_key - fi - travis_retry pip install boto==2.45.0 - travis_retry pip install boto3 - travis_retry pip install dist/moto*.gz - travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt - - if [ "$TEST_SERVER_MODE" = "true" ]; then - python wait_for.py - fi -script: - - make test -after_success: - - coveralls +pyhlanguage: python +sudo: false +services: + - docker +python: + - 2.7 + - 3.6 +env: + - TEST_SERVER_MODE=false + - TEST_SERVER_MODE=true +# Due to incomplete Python 3.7 support on Travis CI ( +# https://github.com/travis-ci/travis-ci/issues/9815), +# using a matrix is necessary +matrix: + include: + - python: 3.7 + env: TEST_SERVER_MODE=false + dist: xenial + sudo: true + - python: 3.7 + env: TEST_SERVER_MODE=true + dist: xenial + sudo: true +before_install: + - export BOTO_CONFIG=/dev/null +install: + # We build moto first so the docker container doesn't try to compile it as well, also note we don't use + # -d for docker run so the logs show up in travis + # Python images come from here: https://hub.docker.com/_/python/ + - | + python setup.py sdist + + if [ "$TEST_SERVER_MODE" = "true" ]; then + docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${TRAVIS_PYTHON_VERSION}-stretch /moto/travis_moto_server.sh & + export AWS_SECRET_ACCESS_KEY=foobar_secret + export AWS_ACCESS_KEY_ID=foobar_key + fi + travis_retry pip install boto==2.45.0 + travis_retry pip install boto3 + travis_retry pip install dist/moto*.gz + travis_retry pip install coveralls==1.1 + travis_retry pip install -r requirements-dev.txt + + if [ "$TEST_SERVER_MODE" = "true" ]; then + python wait_for.py + fi +script: + - make test +after_success: + - coveralls diff --git a/requirements-dev.txt b/requirements-dev.txt index 111cd5f3f..5470815ee 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,17 +1,17 @@ --r requirements.txt -mock -nose -sure==1.4.11 -coverage -flake8==3.5.0 -freezegun -flask -boto>=2.45.0 -boto3>=1.4.4 -botocore>=1.8.36 -six>=1.9 -prompt-toolkit==1.0.14 -click==6.7 -inflection==0.3.1 -lxml==4.2.3 -beautifulsoup4==4.6.0 +-r requirements.txt +mock +nose +sure==1.4.11 +coverage +flake8==3.5.0 +freezegun +flask +boto>=2.45.0 +boto3>=1.4.4 +botocore>=1.12.13 +six>=1.9 +prompt-toolkit==1.0.14 +click==6.7 +inflection==0.3.1 +lxml==4.2.3 +beautifulsoup4==4.6.0 \ No newline at end of file diff --git a/setup.py b/setup.py index 98780dd5a..f547f7b43 100755 --- a/setup.py +++ b/setup.py @@ -1,71 +1,71 @@ -#!/usr/bin/env python -from __future__ import unicode_literals -import setuptools -from setuptools import setup, find_packages -import sys - - -install_requires = [ - "Jinja2>=2.7.3", - "boto>=2.36.0", - "boto3>=1.6.16,<1.8", - "botocore>=1.9.16,<1.11", - "cryptography>=2.3.0", - "requests>=2.5", - "xmltodict", - "six>1.9", - "werkzeug", - "pyaml", - "pytz", - "python-dateutil<3.0.0,>=2.1", - "python-jose<3.0.0", - "mock", - "docker>=2.5.1", - "jsondiff==1.1.1", - "aws-xray-sdk<0.96,>=0.93", - "responses>=0.9.0", -] - -extras_require = { - 'server': ['flask'], -} - -# https://hynek.me/articles/conditional-python-dependencies/ -if int(setuptools.__version__.split(".", 1)[0]) < 18: - if sys.version_info[0:2] < (3, 3): - install_requires.append("backports.tempfile") -else: - extras_require[":python_version<'3.3'"] = ["backports.tempfile"] - - -setup( - name='moto', - version='1.3.6', - description='A library that allows your python tests to easily' - ' mock out the boto library', - author='Steve Pulec', - author_email='spulec@gmail.com', - url='https://github.com/spulec/moto', - entry_points={ - 'console_scripts': [ - 'moto_server = moto.server:main', - ], - }, - packages=find_packages(exclude=("tests", "tests.*")), - install_requires=install_requires, - extras_require=extras_require, - include_package_data=True, - license="Apache", - test_suite="tests", - classifiers=[ - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "License :: OSI Approved :: Apache Software License", - "Topic :: Software Development :: Testing", - ], -) +#!/usr/bin/env python +from __future__ import unicode_literals +import setuptools +from setuptools import setup, find_packages +import sys + + +install_requires = [ + "Jinja2>=2.7.3", + "boto>=2.36.0", + "boto3>=1.6.16", + "botocore>=1.12.13", + "cryptography>=2.3.0", + "requests>=2.5", + "xmltodict", + "six>1.9", + "werkzeug", + "pyaml", + "pytz", + "python-dateutil<3.0.0,>=2.1", + "python-jose<3.0.0", + "mock", + "docker>=2.5.1", + "jsondiff==1.1.1", + "aws-xray-sdk!=0.96,>=0.93", + "responses>=0.9.0", +] + +extras_require = { + 'server': ['flask'], +} + +# https://hynek.me/articles/conditional-python-dependencies/ +if int(setuptools.__version__.split(".", 1)[0]) < 18: + if sys.version_info[0:2] < (3, 3): + install_requires.append("backports.tempfile") +else: + extras_require[":python_version<'3.3'"] = ["backports.tempfile"] + + +setup( + name='moto', + version='1.3.7', + description='A library that allows your python tests to easily' + ' mock out the boto library', + author='Steve Pulec', + author_email='spulec@gmail.com', + url='https://github.com/spulec/moto', + entry_points={ + 'console_scripts': [ + 'moto_server = moto.server:main', + ], + }, + packages=find_packages(exclude=("tests", "tests.*")), + install_requires=install_requires, + extras_require=extras_require, + include_package_data=True, + license="Apache", + test_suite="tests", + classifiers=[ + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "License :: OSI Approved :: Apache Software License", + "Topic :: Software Development :: Testing", + ], +) \ No newline at end of file diff --git a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py index 37c7ca4f3..189cc36cd 100644 --- a/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py +++ b/tests/test_cloudformation/fixtures/single_instance_with_ebs_volume.py @@ -1,345 +1,345 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": "The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String" - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters." - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ] - }, - "VolumeSize": { - "Description": "WebServer EC2 instance type", - "Default": "5", - "Type": "Number", - "MaxValue": "1024", - "MinValue": "5", - "ConstraintDescription": "must be between 5 and 1024 Gb." - } - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "WebsiteURL": { - "Description": "URL for Gollum wiki", - "Value": { - "Fn::Join": [ - "", - [ - "http://", - { - "Fn::GetAtt": [ - "WebServer", - "PublicDnsName" - ] - } - ] - ] - } - } - }, - "Resources": { - "WebServerSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80" - }, - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": { - "Ref": "SSHLocation" - }, - "FromPort": "22" - } - ], - "GroupDescription": "Enable SSH access and HTTP access on the inbound port" - } - }, - "WebServer": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash -v\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", - { - "Ref": "WaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "# Install Rails packages\n", - "/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServer ", - " --region ", - { - "Ref": "AWS::Region" - }, - " || error_exit 'Failed to run cfn-init'\n", - "# Wait for the EBS volume to show up\n", - "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", - "# Format the EBS volume and mount it\n", - "mkdir /var/wikidata\n", - "/sbin/mkfs -t ext3 /dev/sdh1\n", - "mount /dev/sdh1 /var/wikidata\n", - "# Initialize the wiki and fire up the server\n", - "cd /var/wikidata\n", - "git init\n", - "gollum --port 80 --host 0.0.0.0 &\n", - "# If all is well so signal success\n", - "/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '", - { - "Ref": "WaitHandle" - }, - "'\n" - ] - ] - } - }, - "KeyName": { - "Ref": "KeyName" - }, - "SecurityGroups": [ - { - "Ref": "WebServerSecurityGroup" - } - ], - "InstanceType": { - "Ref": "InstanceType" - }, - "ImageId": { - "Fn::FindInMap": [ - "AWSRegionArch2AMI", - { - "Ref": "AWS::Region" - }, - { - "Fn::FindInMap": [ - "AWSInstanceType2Arch", - { - "Ref": "InstanceType" - }, - "Arch" - ] - } - ] - } - }, - "Metadata": { - "AWS::CloudFormation::Init": { - "config": { - "packages": { - "rubygems": { - "nokogiri": [ - "1.5.10" - ], - "rdiscount": [], - "gollum": [ - "1.1.1" - ] - }, - "yum": { - "libxslt-devel": [], - "gcc": [], - "git": [], - "rubygems": [], - "ruby-devel": [], - "ruby-rdoc": [], - "make": [], - "libxml2-devel": [] - } - } - } - } - } - }, - "DataVolume": { - "Type": "AWS::EC2::Volume", - "Properties": { - "Tags": [ - { - "Value": "Gollum Data Volume", - "Key": "Usage" - } - ], - "AvailabilityZone": { - "Fn::GetAtt": [ - "WebServer", - "AvailabilityZone" - ] - }, - "Size": "100", - } - }, - "MountPoint": { - "Type": "AWS::EC2::VolumeAttachment", - "Properties": { - "InstanceId": { - "Ref": "WebServer" - }, - "Device": "/dev/sdh", - "VolumeId": { - "Ref": "DataVolume" - } - } - }, - "WaitCondition": { - "DependsOn": "MountPoint", - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": { - "Handle": { - "Ref": "WaitHandle" - }, - "Timeout": "300" - }, - "Metadata": { - "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", - "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion" - } - }, - "WaitHandle": { - "Type": "AWS::CloudFormation::WaitConditionHandle" - } - }, - "Mappings": { - "AWSInstanceType2Arch": { - "m3.2xlarge": { - "Arch": "64" - }, - "m2.2xlarge": { - "Arch": "64" - }, - "m1.small": { - "Arch": "64" - }, - "c1.medium": { - "Arch": "64" - }, - "cg1.4xlarge": { - "Arch": "64HVM" - }, - "m2.xlarge": { - "Arch": "64" - }, - "t1.micro": { - "Arch": "64" - }, - "cc1.4xlarge": { - "Arch": "64HVM" - }, - "m1.medium": { - "Arch": "64" - }, - "cc2.8xlarge": { - "Arch": "64HVM" - }, - "m1.large": { - "Arch": "64" - }, - "m1.xlarge": { - "Arch": "64" - }, - "m2.4xlarge": { - "Arch": "64" - }, - "c1.xlarge": { - "Arch": "64" - }, - "m3.xlarge": { - "Arch": "64" - } - }, - "AWSRegionArch2AMI": { - "ap-southeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b4b0cae6", - "64": "ami-beb0caec" - }, - "ap-southeast-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-b3990e89", - "64": "ami-bd990e87" - }, - "us-west-2": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-38fe7308", - "64": "ami-30fe7300" - }, - "us-east-1": { - "64HVM": "ami-0da96764", - "32": "ami-31814f58", - "64": "ami-1b814f72" - }, - "ap-northeast-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-0644f007", - "64": "ami-0a44f00b" - }, - "us-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-11d68a54", - "64": "ami-1bd68a5e" - }, - "eu-west-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-973b06e3", - "64": "ami-953b06e1" - }, - "sa-east-1": { - "64HVM": "NOT_YET_SUPPORTED", - "32": "ami-3e3be423", - "64": "ami-3c3be421" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template Gollum_Single_Instance_With_EBS_Volume: Gollum is a simple wiki system built on top of Git that powers GitHub Wikis. This template installs a Gollum Wiki stack on a single EC2 instance with an EBS volume for storage and demonstrates using the AWS CloudFormation bootstrap scripts to install the packages and files necessary at instance launch time. **WARNING** This template creates an Amazon EC2 instance and an EBS volume. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": "The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String" + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instances", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters." + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge" + ] + }, + "VolumeSize": { + "Description": "WebServer EC2 instance type", + "Default": "5", + "Type": "Number", + "MaxValue": "1024", + "MinValue": "5", + "ConstraintDescription": "must be between 5 and 1024 Gb." + } + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "WebsiteURL": { + "Description": "URL for Gollum wiki", + "Value": { + "Fn::Join": [ + "", + [ + "http://", + { + "Fn::GetAtt": [ + "WebServer", + "PublicDnsName" + ] + } + ] + ] + } + } + }, + "Resources": { + "WebServerSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80" + }, + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": { + "Ref": "SSHLocation" + }, + "FromPort": "22" + } + ], + "GroupDescription": "Enable SSH access and HTTP access on the inbound port" + } + }, + "WebServer": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash -v\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", + { + "Ref": "WaitHandle" + }, + "'\n", + " exit 1\n", + "}\n", + "# Install Rails packages\n", + "/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServer ", + " --region ", + { + "Ref": "AWS::Region" + }, + " || error_exit 'Failed to run cfn-init'\n", + "# Wait for the EBS volume to show up\n", + "while [ ! -e /dev/sdh ]; do echo Waiting for EBS volume to attach; sleep 5; done\n", + "# Format the EBS volume and mount it\n", + "mkdir /var/wikidata\n", + "/sbin/mkfs -t ext3 /dev/sdh1\n", + "mount /dev/sdh1 /var/wikidata\n", + "# Initialize the wiki and fire up the server\n", + "cd /var/wikidata\n", + "git init\n", + "gollum --port 80 --host 0.0.0.0 &\n", + "# If all is well so signal success\n", + "/opt/aws/bin/cfn-signal -e $? -r \"Rails application setup complete\" '", + { + "Ref": "WaitHandle" + }, + "'\n" + ] + ] + } + }, + "KeyName": { + "Ref": "KeyName" + }, + "SecurityGroups": [ + { + "Ref": "WebServerSecurityGroup" + } + ], + "InstanceType": { + "Ref": "InstanceType" + }, + "ImageId": { + "Fn::FindInMap": [ + "AWSRegionArch2AMI", + { + "Ref": "AWS::Region" + }, + { + "Fn::FindInMap": [ + "AWSInstanceType2Arch", + { + "Ref": "InstanceType" + }, + "Arch" + ] + } + ] + } + }, + "Metadata": { + "AWS::CloudFormation::Init": { + "config": { + "packages": { + "rubygems": { + "nokogiri": [ + "1.5.10" + ], + "rdiscount": [], + "gollum": [ + "1.1.1" + ] + }, + "yum": { + "libxslt-devel": [], + "gcc": [], + "git": [], + "rubygems": [], + "ruby-devel": [], + "ruby-rdoc": [], + "make": [], + "libxml2-devel": [] + } + } + } + } + } + }, + "DataVolume": { + "Type": "AWS::EC2::Volume", + "Properties": { + "Tags": [ + { + "Value": "Gollum Data Volume", + "Key": "Usage" + } + ], + "AvailabilityZone": { + "Fn::GetAtt": [ + "WebServer", + "AvailabilityZone" + ] + }, + "Size": "100", + } + }, + "MountPoint": { + "Type": "AWS::EC2::VolumeAttachment", + "Properties": { + "InstanceId": { + "Ref": "WebServer" + }, + "Device": "/dev/sdh", + "VolumeId": { + "Ref": "DataVolume" + } + } + }, + "WaitCondition": { + "DependsOn": "MountPoint", + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": { + "Handle": { + "Ref": "WaitHandle" + }, + "Timeout": "300" + }, + "Metadata": { + "Comment1": "Note that the WaitCondition is dependent on the volume mount point allowing the volume to be created and attached to the EC2 instance", + "Comment2": "The instance bootstrap script waits for the volume to be attached to the instance prior to installing Gollum and signalling completion" + } + }, + "WaitHandle": { + "Type": "AWS::CloudFormation::WaitConditionHandle" + } + }, + "Mappings": { + "AWSInstanceType2Arch": { + "m3.2xlarge": { + "Arch": "64" + }, + "m2.2xlarge": { + "Arch": "64" + }, + "m1.small": { + "Arch": "64" + }, + "c1.medium": { + "Arch": "64" + }, + "cg1.4xlarge": { + "Arch": "64HVM" + }, + "m2.xlarge": { + "Arch": "64" + }, + "t1.micro": { + "Arch": "64" + }, + "cc1.4xlarge": { + "Arch": "64HVM" + }, + "m1.medium": { + "Arch": "64" + }, + "cc2.8xlarge": { + "Arch": "64HVM" + }, + "m1.large": { + "Arch": "64" + }, + "m1.xlarge": { + "Arch": "64" + }, + "m2.4xlarge": { + "Arch": "64" + }, + "c1.xlarge": { + "Arch": "64" + }, + "m3.xlarge": { + "Arch": "64" + } + }, + "AWSRegionArch2AMI": { + "ap-southeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b4b0cae6", + "64": "ami-beb0caec" + }, + "ap-southeast-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-b3990e89", + "64": "ami-bd990e87" + }, + "us-west-2": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-38fe7308", + "64": "ami-30fe7300" + }, + "us-east-1": { + "64HVM": "ami-0da96764", + "32": "ami-31814f58", + "64": "ami-1b814f72" + }, + "ap-northeast-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-0644f007", + "64": "ami-0a44f00b" + }, + "us-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-11d68a54", + "64": "ami-1bd68a5e" + }, + "eu-west-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-973b06e3", + "64": "ami-953b06e1" + }, + "sa-east-1": { + "64HVM": "NOT_YET_SUPPORTED", + "32": "ami-3e3be423", + "64": "ami-3c3be421" + } + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_eip.py b/tests/test_cloudformation/fixtures/vpc_eip.py index c7a46c830..2d6872f64 100644 --- a/tests/test_cloudformation/fixtures/vpc_eip.py +++ b/tests/test_cloudformation/fixtures/vpc_eip.py @@ -1,12 +1,12 @@ -from __future__ import unicode_literals - -template = { - "Resources": { - "VPCEIP": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Resources": { + "VPCEIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_eni.py b/tests/test_cloudformation/fixtures/vpc_eni.py index ef9eb1d08..bc13e691f 100644 --- a/tests/test_cloudformation/fixtures/vpc_eni.py +++ b/tests/test_cloudformation/fixtures/vpc_eni.py @@ -1,34 +1,34 @@ -from __future__ import unicode_literals - -template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "VPC ENI Test CloudFormation", - "Resources": { - "ENI": { - "Type": "AWS::EC2::NetworkInterface", - "Properties": { - "SubnetId": {"Ref": "Subnet"} - } - }, - "Subnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "AvailabilityZone": "us-east-1a", - "VpcId": {"Ref": "VPC"}, - "CidrBlock": "10.0.0.0/24" - } - }, - "VPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16" - } - } - }, - "Outputs": { - "NinjaENI": { - "Description": "Elastic IP mapping to Auto-Scaling Group", - "Value": {"Ref": "ENI"} - } - } -} +from __future__ import unicode_literals + +template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "VPC ENI Test CloudFormation", + "Resources": { + "ENI": { + "Type": "AWS::EC2::NetworkInterface", + "Properties": { + "SubnetId": {"Ref": "Subnet"} + } + }, + "Subnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "AvailabilityZone": "us-east-1a", + "VpcId": {"Ref": "VPC"}, + "CidrBlock": "10.0.0.0/24" + } + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16" + } + } + }, + "Outputs": { + "NinjaENI": { + "Description": "Elastic IP mapping to Auto-Scaling Group", + "Value": {"Ref": "ENI"} + } + } +} diff --git a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py index 177da884e..39f02462e 100644 --- a/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py +++ b/tests/test_cloudformation/fixtures/vpc_single_instance_in_subnet.py @@ -1,408 +1,408 @@ -from __future__ import unicode_literals - -template = { - "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", - "Parameters": { - "SSHLocation": { - "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", - "Description": " The IP address range that can be used to SSH to the EC2 instances", - "Default": "0.0.0.0/0", - "MinLength": "9", - "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", - "MaxLength": "18", - "Type": "String" - }, - "KeyName": { - "Type": "String", - "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", - "MinLength": "1", - "AllowedPattern": "[\\x20-\\x7E]*", - "MaxLength": "255", - "ConstraintDescription": "can contain only ASCII characters." - }, - "InstanceType": { - "Default": "m1.small", - "ConstraintDescription": "must be a valid EC2 instance type.", - "Type": "String", - "Description": "WebServer EC2 instance type", - "AllowedValues": [ - "t1.micro", - "m1.small", - "m1.medium", - "m1.large", - "m1.xlarge", - "m2.xlarge", - "m2.2xlarge", - "m2.4xlarge", - "m3.xlarge", - "m3.2xlarge", - "c1.medium", - "c1.xlarge", - "cc1.4xlarge", - "cc2.8xlarge", - "cg1.4xlarge" - ] - } - }, - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "URL": { - "Description": "Newly created application URL", - "Value": { - "Fn::Join": [ - "", - [ - "http://", - { - "Fn::GetAtt": [ - "WebServerInstance", - "PublicIp" - ] - } - ] - ] - } - } - }, - "Resources": { - "Subnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "CidrBlock": "10.0.0.0/24", - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "WebServerWaitHandle": { - "Type": "AWS::CloudFormation::WaitConditionHandle" - }, - "Route": { - "Type": "AWS::EC2::Route", - "Properties": { - "GatewayId": { - "Ref": "InternetGateway" - }, - "DestinationCidrBlock": "0.0.0.0/0", - "RouteTableId": { - "Ref": "RouteTable" - } - }, - "DependsOn": "AttachGateway" - }, - "SubnetRouteTableAssociation": { - "Type": "AWS::EC2::SubnetRouteTableAssociation", - "Properties": { - "SubnetId": { - "Ref": "Subnet" - }, - "RouteTableId": { - "Ref": "RouteTable" - } - } - }, - "InternetGateway": { - "Type": "AWS::EC2::InternetGateway", - "Properties": { - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "RouteTable": { - "Type": "AWS::EC2::RouteTable", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "WebServerWaitCondition": { - "Type": "AWS::CloudFormation::WaitCondition", - "Properties": { - "Handle": { - "Ref": "WebServerWaitHandle" - }, - "Timeout": "300" - }, - "DependsOn": "WebServerInstance" - }, - "VPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - } - ] - } - }, - "InstanceSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "SecurityGroupIngress": [ - { - "ToPort": "22", - "IpProtocol": "tcp", - "CidrIp": { - "Ref": "SSHLocation" - }, - "FromPort": "22" - }, - { - "ToPort": "80", - "IpProtocol": "tcp", - "CidrIp": "0.0.0.0/0", - "FromPort": "80" - } - ], - "VpcId": { - "Ref": "VPC" - }, - "GroupDescription": "Enable SSH access via port 22" - } - }, - "WebServerInstance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "#!/bin/bash\n", - "yum update -y aws-cfn-bootstrap\n", - "# Helper function\n", - "function error_exit\n", - "{\n", - " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", - { - "Ref": "WebServerWaitHandle" - }, - "'\n", - " exit 1\n", - "}\n", - "# Install the simple web page\n", - "/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServerInstance ", - " --region ", - { - "Ref": "AWS::Region" - }, - " || error_exit 'Failed to run cfn-init'\n", - "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", - "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", - "# All done so signal success\n", - "/opt/aws/bin/cfn-signal -e 0 -r \"WebServer setup complete\" '", - { - "Ref": "WebServerWaitHandle" - }, - "'\n" - ] - ] - } - }, - "Tags": [ - { - "Value": { - "Ref": "AWS::StackId" - }, - "Key": "Application" - }, - { - "Value": "Bar", - "Key": "Foo" - } - ], - "SecurityGroupIds": [ - { - "Ref": "InstanceSecurityGroup" - } - ], - "KeyName": { - "Ref": "KeyName" - }, - "SubnetId": { - "Ref": "Subnet" - }, - "ImageId": { - "Fn::FindInMap": [ - "RegionMap", - { - "Ref": "AWS::Region" - }, - "AMI" - ] - }, - "InstanceType": { - "Ref": "InstanceType" - } - }, - "Metadata": { - "Comment": "Install a simple PHP application", - "AWS::CloudFormation::Init": { - "config": { - "files": { - "/etc/cfn/cfn-hup.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[main]\n", - "stack=", - { - "Ref": "AWS::StackId" - }, - "\n", - "region=", - { - "Ref": "AWS::Region" - }, - "\n" - ] - ] - }, - "owner": "root", - "group": "root", - "mode": "000400" - }, - "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { - "content": { - "Fn::Join": [ - "", - [ - "[cfn-auto-reloader-hook]\n", - "triggers=post.update\n", - "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", - "action=/opt/aws/bin/cfn-init -s ", - { - "Ref": "AWS::StackId" - }, - " -r WebServerInstance ", - " --region ", - { - "Ref": "AWS::Region" - }, - "\n", - "runas=root\n" - ] - ] - } - }, - "/var/www/html/index.php": { - "content": { - "Fn::Join": [ - "", - [ - "AWS CloudFormation sample PHP application';\n", - "?>\n" - ] - ] - }, - "owner": "apache", - "group": "apache", - "mode": "000644" - } - }, - "services": { - "sysvinit": { - "httpd": { - "ensureRunning": "true", - "enabled": "true" - }, - "sendmail": { - "ensureRunning": "false", - "enabled": "false" - } - } - }, - "packages": { - "yum": { - "httpd": [], - "php": [] - } - } - } - } - } - }, - "IPAddress": { - "Type": "AWS::EC2::EIP", - "Properties": { - "InstanceId": { - "Ref": "WebServerInstance" - }, - "Domain": "vpc" - }, - "DependsOn": "AttachGateway" - }, - "AttachGateway": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "VpcId": { - "Ref": "VPC" - }, - "InternetGatewayId": { - "Ref": "InternetGateway" - } - } - } - }, - "Mappings": { - "RegionMap": { - "ap-southeast-1": { - "AMI": "ami-74dda626" - }, - "ap-southeast-2": { - "AMI": "ami-b3990e89" - }, - "us-west-2": { - "AMI": "ami-16fd7026" - }, - "us-east-1": { - "AMI": "ami-7f418316" - }, - "ap-northeast-1": { - "AMI": "ami-dcfa4edd" - }, - "us-west-1": { - "AMI": "ami-951945d0" - }, - "eu-west-1": { - "AMI": "ami-24506250" - }, - "sa-east-1": { - "AMI": "ami-3e3be423" - } - } - } -} +from __future__ import unicode_literals + +template = { + "Description": "AWS CloudFormation Sample Template vpc_single_instance_in_subnet.template: Sample template showing how to create a VPC and add an EC2 instance with an Elastic IP address and a security group. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "SSHLocation": { + "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x.", + "Description": " The IP address range that can be used to SSH to the EC2 instances", + "Default": "0.0.0.0/0", + "MinLength": "9", + "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", + "MaxLength": "18", + "Type": "String" + }, + "KeyName": { + "Type": "String", + "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", + "MinLength": "1", + "AllowedPattern": "[\\x20-\\x7E]*", + "MaxLength": "255", + "ConstraintDescription": "can contain only ASCII characters." + }, + "InstanceType": { + "Default": "m1.small", + "ConstraintDescription": "must be a valid EC2 instance type.", + "Type": "String", + "Description": "WebServer EC2 instance type", + "AllowedValues": [ + "t1.micro", + "m1.small", + "m1.medium", + "m1.large", + "m1.xlarge", + "m2.xlarge", + "m2.2xlarge", + "m2.4xlarge", + "m3.xlarge", + "m3.2xlarge", + "c1.medium", + "c1.xlarge", + "cc1.4xlarge", + "cc2.8xlarge", + "cg1.4xlarge" + ] + } + }, + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "URL": { + "Description": "Newly created application URL", + "Value": { + "Fn::Join": [ + "", + [ + "http://", + { + "Fn::GetAtt": [ + "WebServerInstance", + "PublicIp" + ] + } + ] + ] + } + } + }, + "Resources": { + "Subnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "CidrBlock": "10.0.0.0/24", + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "WebServerWaitHandle": { + "Type": "AWS::CloudFormation::WaitConditionHandle" + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "GatewayId": { + "Ref": "InternetGateway" + }, + "DestinationCidrBlock": "0.0.0.0/0", + "RouteTableId": { + "Ref": "RouteTable" + } + }, + "DependsOn": "AttachGateway" + }, + "SubnetRouteTableAssociation": { + "Type": "AWS::EC2::SubnetRouteTableAssociation", + "Properties": { + "SubnetId": { + "Ref": "Subnet" + }, + "RouteTableId": { + "Ref": "RouteTable" + } + } + }, + "InternetGateway": { + "Type": "AWS::EC2::InternetGateway", + "Properties": { + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "RouteTable": { + "Type": "AWS::EC2::RouteTable", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "WebServerWaitCondition": { + "Type": "AWS::CloudFormation::WaitCondition", + "Properties": { + "Handle": { + "Ref": "WebServerWaitHandle" + }, + "Timeout": "300" + }, + "DependsOn": "WebServerInstance" + }, + "VPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + } + ] + } + }, + "InstanceSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "SecurityGroupIngress": [ + { + "ToPort": "22", + "IpProtocol": "tcp", + "CidrIp": { + "Ref": "SSHLocation" + }, + "FromPort": "22" + }, + { + "ToPort": "80", + "IpProtocol": "tcp", + "CidrIp": "0.0.0.0/0", + "FromPort": "80" + } + ], + "VpcId": { + "Ref": "VPC" + }, + "GroupDescription": "Enable SSH access via port 22" + } + }, + "WebServerInstance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "#!/bin/bash\n", + "yum update -y aws-cfn-bootstrap\n", + "# Helper function\n", + "function error_exit\n", + "{\n", + " /opt/aws/bin/cfn-signal -e 1 -r \"$1\" '", + { + "Ref": "WebServerWaitHandle" + }, + "'\n", + " exit 1\n", + "}\n", + "# Install the simple web page\n", + "/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServerInstance ", + " --region ", + { + "Ref": "AWS::Region" + }, + " || error_exit 'Failed to run cfn-init'\n", + "# Start up the cfn-hup daemon to listen for changes to the Web Server metadata\n", + "/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup'\n", + "# All done so signal success\n", + "/opt/aws/bin/cfn-signal -e 0 -r \"WebServer setup complete\" '", + { + "Ref": "WebServerWaitHandle" + }, + "'\n" + ] + ] + } + }, + "Tags": [ + { + "Value": { + "Ref": "AWS::StackId" + }, + "Key": "Application" + }, + { + "Value": "Bar", + "Key": "Foo" + } + ], + "SecurityGroupIds": [ + { + "Ref": "InstanceSecurityGroup" + } + ], + "KeyName": { + "Ref": "KeyName" + }, + "SubnetId": { + "Ref": "Subnet" + }, + "ImageId": { + "Fn::FindInMap": [ + "RegionMap", + { + "Ref": "AWS::Region" + }, + "AMI" + ] + }, + "InstanceType": { + "Ref": "InstanceType" + } + }, + "Metadata": { + "Comment": "Install a simple PHP application", + "AWS::CloudFormation::Init": { + "config": { + "files": { + "/etc/cfn/cfn-hup.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[main]\n", + "stack=", + { + "Ref": "AWS::StackId" + }, + "\n", + "region=", + { + "Ref": "AWS::Region" + }, + "\n" + ] + ] + }, + "owner": "root", + "group": "root", + "mode": "000400" + }, + "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { + "content": { + "Fn::Join": [ + "", + [ + "[cfn-auto-reloader-hook]\n", + "triggers=post.update\n", + "path=Resources.WebServerInstance.Metadata.AWS::CloudFormation::Init\n", + "action=/opt/aws/bin/cfn-init -s ", + { + "Ref": "AWS::StackId" + }, + " -r WebServerInstance ", + " --region ", + { + "Ref": "AWS::Region" + }, + "\n", + "runas=root\n" + ] + ] + } + }, + "/var/www/html/index.php": { + "content": { + "Fn::Join": [ + "", + [ + "AWS CloudFormation sample PHP application';\n", + "?>\n" + ] + ] + }, + "owner": "apache", + "group": "apache", + "mode": "000644" + } + }, + "services": { + "sysvinit": { + "httpd": { + "ensureRunning": "true", + "enabled": "true" + }, + "sendmail": { + "ensureRunning": "false", + "enabled": "false" + } + } + }, + "packages": { + "yum": { + "httpd": [], + "php": [] + } + } + } + } + } + }, + "IPAddress": { + "Type": "AWS::EC2::EIP", + "Properties": { + "InstanceId": { + "Ref": "WebServerInstance" + }, + "Domain": "vpc" + }, + "DependsOn": "AttachGateway" + }, + "AttachGateway": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "VpcId": { + "Ref": "VPC" + }, + "InternetGatewayId": { + "Ref": "InternetGateway" + } + } + } + }, + "Mappings": { + "RegionMap": { + "ap-southeast-1": { + "AMI": "ami-74dda626" + }, + "ap-southeast-2": { + "AMI": "ami-b3990e89" + }, + "us-west-2": { + "AMI": "ami-16fd7026" + }, + "us-east-1": { + "AMI": "ami-7f418316" + }, + "ap-northeast-1": { + "AMI": "ami-dcfa4edd" + }, + "us-west-1": { + "AMI": "ami-951945d0" + }, + "eu-west-1": { + "AMI": "ami-24506250" + }, + "sa-east-1": { + "AMI": "ami-3e3be423" + } + } + } +} diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 801faf8a1..a61aa157a 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -1,672 +1,672 @@ -from __future__ import unicode_literals - -import os -import json - -import boto -import boto.s3 -import boto.s3.key -import boto.cloudformation -from boto.exception import BotoServerError -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated -from moto.cloudformation import cloudformation_backends - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, -} - -dummy_template2 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 2", - "Resources": {}, -} - -# template with resource which has no delete attribute defined -dummy_template3 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 3", - "Resources": { - "VPC": { - "Properties": { - "CidrBlock": "192.168.0.0/16", - }, - "Type": "AWS::EC2::VPC" - } - }, -} - -dummy_template_json = json.dumps(dummy_template) -dummy_template_json2 = json.dumps(dummy_template2) -dummy_template_json3 = json.dumps(dummy_template3) - - -@mock_cloudformation_deprecated -def test_create_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks()[0] - stack.stack_name.should.equal('test_stack') - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -@mock_route53_deprecated -def test_create_stack_hosted_zone_by_id(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Parameters": { - }, - "Resources": { - "Bar": { - "Type" : "AWS::Route53::HostedZone", - "Properties" : { - "Name" : "foo.bar.baz", - } - }, - }, - } - dummy_template2 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 2", - "Parameters": { - "ZoneId": { "Type": "String" } - }, - "Resources": { - "Foo": { - "Properties": { - "HostedZoneId": {"Ref": "ZoneId"}, - "RecordSets": [] - }, - "Type": "AWS::Route53::RecordSetGroup" - } - }, - } - conn.create_stack( - "test_stack", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - r53_conn = boto.connect_route53() - zone_id = r53_conn.get_zones()[0].id - conn.create_stack( - "test_stack", - template_body=json.dumps(dummy_template2), - parameters={"ZoneId": zone_id}.items() - ) - - stack = conn.describe_stacks()[0] - assert stack.list_resources() - - -@mock_cloudformation_deprecated -def test_creating_stacks_across_regions(): - west1_conn = boto.cloudformation.connect_to_region("us-west-1") - west1_conn.create_stack("test_stack", template_body=dummy_template_json) - - west2_conn = boto.cloudformation.connect_to_region("us-west-2") - west2_conn.create_stack("test_stack", template_body=dummy_template_json) - - list(west1_conn.describe_stacks()).should.have.length_of(1) - list(west2_conn.describe_stacks()).should.have.length_of(1) - - -@mock_cloudformation_deprecated -def test_create_stack_with_notification_arn(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack_with_notifications", - template_body=dummy_template_json, - notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue' - ) - - stack = conn.describe_stacks()[0] - [n.value for n in stack.notification_arns].should.contain( - 'arn:aws:sns:us-east-1:123456789012:fake-queue') - - -@mock_cloudformation_deprecated -@mock_s3_deprecated -def test_create_stack_from_s3_url(): - s3_conn = boto.s3.connect_to_region('us-west-1') - bucket = s3_conn.create_bucket("foobar") - key = boto.s3.key.Key(bucket) - key.key = "template-key" - key.set_contents_from_string(dummy_template_json) - key_url = key.generate_url(expires_in=0, query_auth=False) - - conn = boto.cloudformation.connect_to_region('us-west-1') - conn.create_stack('new-stack', template_url=key_url) - - stack = conn.describe_stacks()[0] - stack.stack_name.should.equal('new-stack') - stack.get_template().should.equal( - { - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -def test_describe_stack_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack.stack_name.should.equal('test_stack') - - -@mock_cloudformation_deprecated -def test_describe_stack_by_stack_id(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack_by_id = conn.describe_stacks(stack.stack_id)[0] - stack_by_id.stack_id.should.equal(stack.stack_id) - stack_by_id.stack_name.should.equal("test_stack") - - -@mock_cloudformation_deprecated -def test_describe_deleted_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - stack = conn.describe_stacks("test_stack")[0] - stack_id = stack.stack_id - conn.delete_stack(stack.stack_id) - stack_by_id = conn.describe_stacks(stack_id)[0] - stack_by_id.stack_id.should.equal(stack.stack_id) - stack_by_id.stack_name.should.equal("test_stack") - stack_by_id.stack_status.should.equal("DELETE_COMPLETE") - - -@mock_cloudformation_deprecated -def test_get_template_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - template = conn.get_template("test_stack") - template.should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - - }) - - -@mock_cloudformation_deprecated -def test_list_stacks(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - conn.create_stack( - "test_stack2", - template_body=dummy_template_json, - ) - - stacks = conn.list_stacks() - stacks.should.have.length_of(2) - stacks[0].template_description.should.equal("Stack 1") - - -@mock_cloudformation_deprecated -def test_delete_stack_by_name(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) - - -@mock_cloudformation_deprecated -def test_delete_stack_by_id(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack(stack_id) - conn.list_stacks().should.have.length_of(0) - with assert_raises(BotoServerError): - conn.describe_stacks("test_stack") - - conn.describe_stacks(stack_id).should.have.length_of(1) - - -@mock_cloudformation_deprecated -def test_delete_stack_with_resource_missing_delete_attr(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json3, - ) - - conn.list_stacks().should.have.length_of(1) - conn.delete_stack("test_stack") - conn.list_stacks().should.have.length_of(0) - - -@mock_cloudformation_deprecated -def test_bad_describe_stack(): - conn = boto.connect_cloudformation() - with assert_raises(BotoServerError): - conn.describe_stacks("bad_stack") - - -@mock_cloudformation_deprecated() -def test_cloudformation_params(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, - "Parameters": { - "APPNAME": { - "Default": "app-name", - "Description": "The name of the app", - "Type": "String" - } - } - } - dummy_template_json = json.dumps(dummy_template) - cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ - ('APPNAME', 'testing123')]) - stack = cfn.describe_stacks('test_stack1')[0] - stack.parameters.should.have.length_of(1) - param = stack.parameters[0] - param.key.should.equal('APPNAME') - param.value.should.equal('testing123') - - -@mock_cloudformation_deprecated -def test_cloudformation_params_conditions_and_resources_are_distinct(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Conditions": { - "FooEnabled": { - "Fn::Equals": [ - { - "Ref": "FooEnabled" - }, - "true" - ] - }, - "FooDisabled": { - "Fn::Not": [ - { - "Fn::Equals": [ - { - "Ref": "FooEnabled" - }, - "true" - ] - } - ] - } - }, - "Parameters": { - "FooEnabled": { - "Type": "String", - "AllowedValues": [ - "true", - "false" - ] - } - }, - "Resources": { - "Bar": { - "Properties": { - "CidrBlock": "192.168.0.0/16", - }, - "Condition": "FooDisabled", - "Type": "AWS::EC2::VPC" - } - } - } - dummy_template_json = json.dumps(dummy_template) - cfn = boto.connect_cloudformation() - cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')]) - stack = cfn.describe_stacks('test_stack1')[0] - resources = stack.list_resources() - assert not [resource for resource in resources if resource.logical_resource_id == 'Bar'] - - -@mock_cloudformation_deprecated -def test_stack_tags(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "bar", "baz": "bleh"}, - ) - - stack = conn.describe_stacks()[0] - dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"}) - - -@mock_cloudformation_deprecated -def test_update_stack(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - - conn.update_stack("test_stack", dummy_template_json2) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json2, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - }) - - -@mock_cloudformation_deprecated -def test_update_stack_with_previous_template(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - ) - conn.update_stack("test_stack", use_previous_template=True) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - stack.get_template().should.equal({ - 'GetTemplateResponse': { - 'GetTemplateResult': { - 'TemplateBody': dummy_template_json, - 'ResponseMetadata': { - 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' - } - } - } - }) - - -@mock_cloudformation_deprecated -def test_update_stack_with_parameters(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack", - "Resources": { - "VPC": { - "Properties": { - "CidrBlock": {"Ref": "Bar"} - }, - "Type": "AWS::EC2::VPC" - } - }, - "Parameters": { - "Bar": { - "Type": "String" - } - } - } - dummy_template_json = json.dumps(dummy_template) - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - parameters=[("Bar", "192.168.0.0/16")] - ) - conn.update_stack( - "test_stack", - template_body=dummy_template_json, - parameters=[("Bar", "192.168.0.1/16")] - ) - - stack = conn.describe_stacks()[0] - assert stack.parameters[0].value == "192.168.0.1/16" - - -@mock_cloudformation_deprecated -def test_update_stack_replace_tags(): - conn = boto.connect_cloudformation() - conn.create_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "bar"}, - ) - conn.update_stack( - "test_stack", - template_body=dummy_template_json, - tags={"foo": "baz"}, - ) - - stack = conn.describe_stacks()[0] - stack.stack_status.should.equal("UPDATE_COMPLETE") - # since there is one tag it doesn't come out as a list - dict(stack.tags).should.equal({"foo": "baz"}) - - -@mock_cloudformation_deprecated -def test_update_stack_when_rolled_back(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", template_body=dummy_template_json) - - cloudformation_backends[conn.region.name].stacks[ - stack_id].status = 'ROLLBACK_COMPLETE' - - with assert_raises(BotoServerError) as err: - conn.update_stack("test_stack", dummy_template_json) - - ex = err.exception - ex.body.should.match( - r'is in ROLLBACK_COMPLETE state and can not be updated') - ex.error_code.should.equal('ValidationError') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_cloudformation_deprecated -def test_describe_stack_events_shows_create_update_and_delete(): - conn = boto.connect_cloudformation() - stack_id = conn.create_stack( - "test_stack", template_body=dummy_template_json) - conn.update_stack(stack_id, template_body=dummy_template_json2) - conn.delete_stack(stack_id) - - # assert begins and ends with stack events - events = conn.describe_stack_events(stack_id) - events[0].resource_type.should.equal("AWS::CloudFormation::Stack") - events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - - # testing ordering of stack events without assuming resource events will not exist - # the AWS API returns events in reverse chronological order - stack_events_to_look_for = iter([ - ("DELETE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), - ("UPDATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), - ("CREATE_COMPLETE", None), - ("CREATE_IN_PROGRESS", "User Initiated"), - ]) - try: - for event in events: - event.stack_id.should.equal(stack_id) - event.stack_name.should.equal("test_stack") - event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") - - if event.resource_type == "AWS::CloudFormation::Stack": - event.logical_resource_id.should.equal("test_stack") - event.physical_resource_id.should.equal(stack_id) - - status_to_look_for, reason_to_look_for = next( - stack_events_to_look_for) - event.resource_status.should.equal(status_to_look_for) - if reason_to_look_for is not None: - event.resource_status_reason.should.equal( - reason_to_look_for) - except StopIteration: - assert False, "Too many stack events" - - list(stack_events_to_look_for).should.be.empty - - -@mock_cloudformation_deprecated -def test_create_stack_lambda_and_dynamodb(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack Lambda Test 1", - "Parameters": { - }, - "Resources": { - "func1": { - "Type" : "AWS::Lambda::Function", - "Properties" : { - "Code": { - "S3Bucket": "bucket_123", - "S3Key": "key_123" - }, - "FunctionName": "func1", - "Handler": "handler.handler", - "Role": "role1", - "Runtime": "python2.7", - "Description": "descr", - "MemorySize": 12345, - } - }, - "func1version": { - "Type": "AWS::Lambda::LambdaVersion", - "Properties" : { - "Version": "v1.2.3" - } - }, - "tab1": { - "Type" : "AWS::DynamoDB::Table", - "Properties" : { - "TableName": "tab1", - "KeySchema": [{ - "AttributeName": "attr1", - "KeyType": "HASH" - }], - "AttributeDefinitions": [{ - "AttributeName": "attr1", - "AttributeType": "string" - }], - "ProvisionedThroughput": { - "ReadCapacityUnits": 10, - "WriteCapacityUnits": 10 - } - } - }, - "func1mapping": { - "Type": "AWS::Lambda::EventSourceMapping", - "Properties" : { - "FunctionName": "v1.2.3", - "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", - "StartingPosition": "0", - "BatchSize": 100, - "Enabled": True - } - } - }, - } - validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '') - try: - os.environ['VALIDATE_LAMBDA_S3'] = 'false' - conn.create_stack( - "test_stack_lambda_1", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - finally: - os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before - - stack = conn.describe_stacks()[0] - resources = stack.list_resources() - assert len(resources) == 4 - - -@mock_cloudformation_deprecated -def test_create_stack_kinesis(): - conn = boto.connect_cloudformation() - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack Kinesis Test 1", - "Parameters": {}, - "Resources": { - "stream1": { - "Type" : "AWS::Kinesis::Stream", - "Properties" : { - "Name": "stream1", - "ShardCount": 2 - } - } - } - } - conn.create_stack( - "test_stack_kinesis_1", - template_body=json.dumps(dummy_template), - parameters={}.items() - ) - - stack = conn.describe_stacks()[0] - resources = stack.list_resources() - assert len(resources) == 1 +from __future__ import unicode_literals + +import os +import json + +import boto +import boto.s3 +import boto.s3.key +import boto.cloudformation +from boto.exception import BotoServerError +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated +from moto.cloudformation import cloudformation_backends + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, +} + +dummy_template2 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 2", + "Resources": {}, +} + +# template with resource which has no delete attribute defined +dummy_template3 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 3", + "Resources": { + "VPC": { + "Properties": { + "CidrBlock": "192.168.0.0/16", + }, + "Type": "AWS::EC2::VPC" + } + }, +} + +dummy_template_json = json.dumps(dummy_template) +dummy_template_json2 = json.dumps(dummy_template2) +dummy_template_json3 = json.dumps(dummy_template3) + + +@mock_cloudformation_deprecated +def test_create_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks()[0] + stack.stack_name.should.equal('test_stack') + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +@mock_route53_deprecated +def test_create_stack_hosted_zone_by_id(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Parameters": { + }, + "Resources": { + "Bar": { + "Type" : "AWS::Route53::HostedZone", + "Properties" : { + "Name" : "foo.bar.baz", + } + }, + }, + } + dummy_template2 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 2", + "Parameters": { + "ZoneId": { "Type": "String" } + }, + "Resources": { + "Foo": { + "Properties": { + "HostedZoneId": {"Ref": "ZoneId"}, + "RecordSets": [] + }, + "Type": "AWS::Route53::RecordSetGroup" + } + }, + } + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + r53_conn = boto.connect_route53() + zone_id = r53_conn.get_zones()[0].id + conn.create_stack( + "test_stack", + template_body=json.dumps(dummy_template2), + parameters={"ZoneId": zone_id}.items() + ) + + stack = conn.describe_stacks()[0] + assert stack.list_resources() + + +@mock_cloudformation_deprecated +def test_creating_stacks_across_regions(): + west1_conn = boto.cloudformation.connect_to_region("us-west-1") + west1_conn.create_stack("test_stack", template_body=dummy_template_json) + + west2_conn = boto.cloudformation.connect_to_region("us-west-2") + west2_conn.create_stack("test_stack", template_body=dummy_template_json) + + list(west1_conn.describe_stacks()).should.have.length_of(1) + list(west2_conn.describe_stacks()).should.have.length_of(1) + + +@mock_cloudformation_deprecated +def test_create_stack_with_notification_arn(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack_with_notifications", + template_body=dummy_template_json, + notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue' + ) + + stack = conn.describe_stacks()[0] + [n.value for n in stack.notification_arns].should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') + + +@mock_cloudformation_deprecated +@mock_s3_deprecated +def test_create_stack_from_s3_url(): + s3_conn = boto.s3.connect_to_region('us-west-1') + bucket = s3_conn.create_bucket("foobar") + key = boto.s3.key.Key(bucket) + key.key = "template-key" + key.set_contents_from_string(dummy_template_json) + key_url = key.generate_url(expires_in=0, query_auth=False) + + conn = boto.cloudformation.connect_to_region('us-west-1') + conn.create_stack('new-stack', template_url=key_url) + + stack = conn.describe_stacks()[0] + stack.stack_name.should.equal('new-stack') + stack.get_template().should.equal( + { + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +def test_describe_stack_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack.stack_name.should.equal('test_stack') + + +@mock_cloudformation_deprecated +def test_describe_stack_by_stack_id(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack_by_id = conn.describe_stacks(stack.stack_id)[0] + stack_by_id.stack_id.should.equal(stack.stack_id) + stack_by_id.stack_name.should.equal("test_stack") + + +@mock_cloudformation_deprecated +def test_describe_deleted_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + stack = conn.describe_stacks("test_stack")[0] + stack_id = stack.stack_id + conn.delete_stack(stack.stack_id) + stack_by_id = conn.describe_stacks(stack_id)[0] + stack_by_id.stack_id.should.equal(stack.stack_id) + stack_by_id.stack_name.should.equal("test_stack") + stack_by_id.stack_status.should.equal("DELETE_COMPLETE") + + +@mock_cloudformation_deprecated +def test_get_template_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + template = conn.get_template("test_stack") + template.should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + + }) + + +@mock_cloudformation_deprecated +def test_list_stacks(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + conn.create_stack( + "test_stack2", + template_body=dummy_template_json, + ) + + stacks = conn.list_stacks() + stacks.should.have.length_of(2) + stacks[0].template_description.should.equal("Stack 1") + + +@mock_cloudformation_deprecated +def test_delete_stack_by_name(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack("test_stack") + conn.list_stacks().should.have.length_of(0) + + +@mock_cloudformation_deprecated +def test_delete_stack_by_id(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack(stack_id) + conn.list_stacks().should.have.length_of(0) + with assert_raises(BotoServerError): + conn.describe_stacks("test_stack") + + conn.describe_stacks(stack_id).should.have.length_of(1) + + +@mock_cloudformation_deprecated +def test_delete_stack_with_resource_missing_delete_attr(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json3, + ) + + conn.list_stacks().should.have.length_of(1) + conn.delete_stack("test_stack") + conn.list_stacks().should.have.length_of(0) + + +@mock_cloudformation_deprecated +def test_bad_describe_stack(): + conn = boto.connect_cloudformation() + with assert_raises(BotoServerError): + conn.describe_stacks("bad_stack") + + +@mock_cloudformation_deprecated() +def test_cloudformation_params(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, + "Parameters": { + "APPNAME": { + "Default": "app-name", + "Description": "The name of the app", + "Type": "String" + } + } + } + dummy_template_json = json.dumps(dummy_template) + cfn = boto.connect_cloudformation() + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[ + ('APPNAME', 'testing123')]) + stack = cfn.describe_stacks('test_stack1')[0] + stack.parameters.should.have.length_of(1) + param = stack.parameters[0] + param.key.should.equal('APPNAME') + param.value.should.equal('testing123') + + +@mock_cloudformation_deprecated +def test_cloudformation_params_conditions_and_resources_are_distinct(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Conditions": { + "FooEnabled": { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + }, + "FooDisabled": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "FooEnabled" + }, + "true" + ] + } + ] + } + }, + "Parameters": { + "FooEnabled": { + "Type": "String", + "AllowedValues": [ + "true", + "false" + ] + } + }, + "Resources": { + "Bar": { + "Properties": { + "CidrBlock": "192.168.0.0/16", + }, + "Condition": "FooDisabled", + "Type": "AWS::EC2::VPC" + } + } + } + dummy_template_json = json.dumps(dummy_template) + cfn = boto.connect_cloudformation() + cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')]) + stack = cfn.describe_stacks('test_stack1')[0] + resources = stack.list_resources() + assert not [resource for resource in resources if resource.logical_resource_id == 'Bar'] + + +@mock_cloudformation_deprecated +def test_stack_tags(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "bar", "baz": "bleh"}, + ) + + stack = conn.describe_stacks()[0] + dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"}) + + +@mock_cloudformation_deprecated +def test_update_stack(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + + conn.update_stack("test_stack", dummy_template_json2) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json2, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + }) + + +@mock_cloudformation_deprecated +def test_update_stack_with_previous_template(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + ) + conn.update_stack("test_stack", use_previous_template=True) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + stack.get_template().should.equal({ + 'GetTemplateResponse': { + 'GetTemplateResult': { + 'TemplateBody': dummy_template_json, + 'ResponseMetadata': { + 'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE' + } + } + } + }) + + +@mock_cloudformation_deprecated +def test_update_stack_with_parameters(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack", + "Resources": { + "VPC": { + "Properties": { + "CidrBlock": {"Ref": "Bar"} + }, + "Type": "AWS::EC2::VPC" + } + }, + "Parameters": { + "Bar": { + "Type": "String" + } + } + } + dummy_template_json = json.dumps(dummy_template) + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.0/16")] + ) + conn.update_stack( + "test_stack", + template_body=dummy_template_json, + parameters=[("Bar", "192.168.0.1/16")] + ) + + stack = conn.describe_stacks()[0] + assert stack.parameters[0].value == "192.168.0.1/16" + + +@mock_cloudformation_deprecated +def test_update_stack_replace_tags(): + conn = boto.connect_cloudformation() + conn.create_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "bar"}, + ) + conn.update_stack( + "test_stack", + template_body=dummy_template_json, + tags={"foo": "baz"}, + ) + + stack = conn.describe_stacks()[0] + stack.stack_status.should.equal("UPDATE_COMPLETE") + # since there is one tag it doesn't come out as a list + dict(stack.tags).should.equal({"foo": "baz"}) + + +@mock_cloudformation_deprecated +def test_update_stack_when_rolled_back(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) + + cloudformation_backends[conn.region.name].stacks[ + stack_id].status = 'ROLLBACK_COMPLETE' + + with assert_raises(BotoServerError) as err: + conn.update_stack("test_stack", dummy_template_json) + + ex = err.exception + ex.body.should.match( + r'is in ROLLBACK_COMPLETE state and can not be updated') + ex.error_code.should.equal('ValidationError') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_cloudformation_deprecated +def test_describe_stack_events_shows_create_update_and_delete(): + conn = boto.connect_cloudformation() + stack_id = conn.create_stack( + "test_stack", template_body=dummy_template_json) + conn.update_stack(stack_id, template_body=dummy_template_json2) + conn.delete_stack(stack_id) + + # assert begins and ends with stack events + events = conn.describe_stack_events(stack_id) + events[0].resource_type.should.equal("AWS::CloudFormation::Stack") + events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") + + # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order + stack_events_to_look_for = iter([ + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) + try: + for event in events: + event.stack_id.should.equal(stack_id) + event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") + + if event.resource_type == "AWS::CloudFormation::Stack": + event.logical_resource_id.should.equal("test_stack") + event.physical_resource_id.should.equal(stack_id) + + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) + event.resource_status.should.equal(status_to_look_for) + if reason_to_look_for is not None: + event.resource_status_reason.should.equal( + reason_to_look_for) + except StopIteration: + assert False, "Too many stack events" + + list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation_deprecated +def test_create_stack_lambda_and_dynamodb(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Lambda Test 1", + "Parameters": { + }, + "Resources": { + "func1": { + "Type" : "AWS::Lambda::Function", + "Properties" : { + "Code": { + "S3Bucket": "bucket_123", + "S3Key": "key_123" + }, + "FunctionName": "func1", + "Handler": "handler.handler", + "Role": "role1", + "Runtime": "python2.7", + "Description": "descr", + "MemorySize": 12345, + } + }, + "func1version": { + "Type": "AWS::Lambda::LambdaVersion", + "Properties" : { + "Version": "v1.2.3" + } + }, + "tab1": { + "Type" : "AWS::DynamoDB::Table", + "Properties" : { + "TableName": "tab1", + "KeySchema": [{ + "AttributeName": "attr1", + "KeyType": "HASH" + }], + "AttributeDefinitions": [{ + "AttributeName": "attr1", + "AttributeType": "string" + }], + "ProvisionedThroughput": { + "ReadCapacityUnits": 10, + "WriteCapacityUnits": 10 + } + } + }, + "func1mapping": { + "Type": "AWS::Lambda::EventSourceMapping", + "Properties" : { + "FunctionName": "v1.2.3", + "EventSourceArn": "arn:aws:dynamodb:region:XXXXXX:table/tab1/stream/2000T00:00:00.000", + "StartingPosition": "0", + "BatchSize": 100, + "Enabled": True + } + } + }, + } + validate_s3_before = os.environ.get('VALIDATE_LAMBDA_S3', '') + try: + os.environ['VALIDATE_LAMBDA_S3'] = 'false' + conn.create_stack( + "test_stack_lambda_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + finally: + os.environ['VALIDATE_LAMBDA_S3'] = validate_s3_before + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 4 + + +@mock_cloudformation_deprecated +def test_create_stack_kinesis(): + conn = boto.connect_cloudformation() + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack Kinesis Test 1", + "Parameters": {}, + "Resources": { + "stream1": { + "Type" : "AWS::Kinesis::Stream", + "Properties" : { + "Name": "stream1", + "ShardCount": 2 + } + } + } + } + conn.create_stack( + "test_stack_kinesis_1", + template_body=json.dumps(dummy_template), + parameters={}.items() + ) + + stack = conn.describe_stacks()[0] + resources = stack.list_resources() + assert len(resources) == 1 diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 9bfae6174..152b359e3 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1,795 +1,795 @@ -from __future__ import unicode_literals - -import json -from collections import OrderedDict - -import boto3 -from botocore.exceptions import ClientError -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises - -from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-d3adb33f", - "KeyName": "dummy", - "InstanceType": "t2.micro", - "Tags": [ - { - "Key": "Description", - "Value": "Test tag" - }, - { - "Key": "Name", - "Value": "Name tag for tests" - } - ] - } - } - } -} - -dummy_template_yaml = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: dummy - InstanceType: t2.micro - Tags: - - Key: Description - Value: Test tag - - Key: Name - Value: Name tag for tests -""" - -dummy_template_yaml_with_short_form_func = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: !Join [ ":", [ du, m, my ] ] - InstanceType: t2.micro - Tags: - - Key: Description - Value: Test tag - - Key: Name - Value: Name tag for tests -""" - -dummy_template_yaml_with_ref = """--- -AWSTemplateFormatVersion: 2010-09-09 -Description: Stack1 with yaml template -Parameters: - TagDescription: - Type: String - TagName: - Type: String - -Resources: - EC2Instance1: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-d3adb33f - KeyName: dummy - InstanceType: t2.micro - Tags: - - Key: Description - Value: - Ref: TagDescription - - Key: Name - Value: !Ref TagName -""" - -dummy_update_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters": { - "KeyName": { - "Description": "Name of an existing EC2 KeyPair", - "Type": "AWS::EC2::KeyPair::KeyName", - "ConstraintDescription": "must be the name of an existing EC2 KeyPair." - } - }, - "Resources": { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-08111162" - } - } - } -} - -dummy_output_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": { - "Instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-08111162" - } - } - }, - "Outputs": { - "StackVPC": { - "Description": "The ID of the VPC", - "Value": "VPCID", - "Export": { - "Name": "My VPC ID" - } - } - } -} - -dummy_import_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::ImportValue": 'My VPC ID'}, - "VisibilityTimeout": 60, - } - } - } -} - -dummy_redrive_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MainQueue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "mainqueue.fifo", - "FifoQueue": True, - "ContentBasedDeduplication": False, - "RedrivePolicy": { - "deadLetterTargetArn": { - "Fn::GetAtt": [ - "DeadLetterQueue", - "Arn" - ] - }, - "maxReceiveCount": 5 - } - } - }, - "DeadLetterQueue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "FifoQueue": True - } - }, - } -} - -dummy_template_json = json.dumps(dummy_template) -dummy_update_template_json = json.dumps(dummy_update_template) -dummy_output_template_json = json.dumps(dummy_output_template) -dummy_import_template_json = json.dumps(dummy_import_template) -dummy_redrive_template_json = json.dumps(dummy_redrive_template) - - - -@mock_cloudformation -def test_boto3_create_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -def test_boto3_create_stack_with_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml) - - -@mock_cloudformation -def test_boto3_create_stack_with_short_form_func_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml_with_short_form_func, - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml_with_short_form_func) - - -@mock_cloudformation -def test_boto3_create_stack_with_ref_yaml(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - params = [ - {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, - {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, - ] - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_yaml_with_ref, - Parameters=params - ) - - cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template_yaml_with_ref) - - -@mock_cloudformation -def test_creating_stacks_across_regions(): - west1_cf = boto3.resource('cloudformation', region_name='us-west-1') - west2_cf = boto3.resource('cloudformation', region_name='us-west-2') - west1_cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - west2_cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - list(west1_cf.stacks.all()).should.have.length_of(1) - list(west2_cf.stacks.all()).should.have.length_of(1) - - -@mock_cloudformation -def test_create_stack_with_notification_arn(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack_with_notifications", - TemplateBody=dummy_template_json, - NotificationARNs=['arn:aws:sns:us-east-1:123456789012:fake-queue'], - ) - - stack = list(cf.stacks.all())[0] - stack.notification_arns.should.contain( - 'arn:aws:sns:us-east-1:123456789012:fake-queue') - - -@mock_cloudformation -def test_create_stack_with_role_arn(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack_with_notifications", - TemplateBody=dummy_template_json, - RoleARN='arn:aws:iam::123456789012:role/moto', - ) - stack = list(cf.stacks.all())[0] - stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') - - -@mock_cloudformation -@mock_s3 -def test_create_stack_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - bucket = s3_conn.create_bucket(Bucket="foobar") - - key = s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - - cf_conn = boto3.client('cloudformation', region_name='us-west-1') - cf_conn.create_stack( - StackName='stack_from_url', - TemplateURL=key_url, - ) - cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( - json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -def test_update_stack_with_previous_value(): - name = 'update_stack_with_previous_value' - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName=name, TemplateBody=dummy_template_yaml_with_ref, - Parameters=[ - {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, - {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, - ] - ) - cf_conn.update_stack( - StackName=name, UsePreviousTemplate=True, - Parameters=[ - {'ParameterKey': 'TagName', 'UsePreviousValue': True}, - {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, - ] - ) - stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] - tag_name = [x['ParameterValue'] for x in stack['Parameters'] - if x['ParameterKey'] == 'TagName'][0] - tag_desc = [x['ParameterValue'] for x in stack['Parameters'] - if x['ParameterKey'] == 'TagDescription'][0] - assert tag_name == 'foo' - assert tag_desc == 'not bar' - - -@mock_cloudformation -@mock_s3 -@mock_ec2 -def test_update_stack_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="update_stack_from_url", - TemplateBody=dummy_template_json, - Tags=[{'Key': 'foo', 'Value': 'bar'}], - ) - - s3_conn.create_bucket(Bucket="foobar") - - s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_update_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - - cf_conn.update_stack( - StackName="update_stack_from_url", - TemplateURL=key_url, - ) - - cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( - json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) - - -@mock_cloudformation -@mock_s3 -def test_create_change_set_from_s3_url(): - s3 = boto3.client('s3') - s3_conn = boto3.resource('s3') - bucket = s3_conn.create_bucket(Bucket="foobar") - - key = s3_conn.Object( - 'foobar', 'template-key').put(Body=dummy_template_json) - key_url = s3.generate_presigned_url( - ClientMethod='get_object', - Params={ - 'Bucket': 'foobar', - 'Key': 'template-key' - } - ) - cf_conn = boto3.client('cloudformation', region_name='us-west-1') - response = cf_conn.create_change_set( - StackName='NewStack', - TemplateURL=key_url, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] - assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] - - -@mock_cloudformation -def test_execute_change_set_w_arn(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - change_set = cf_conn.create_change_set( - StackName='NewStack', - TemplateBody=dummy_template_json, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - cf_conn.execute_change_set(ChangeSetName=change_set['Id']) - - -@mock_cloudformation -def test_execute_change_set_w_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - change_set = cf_conn.create_change_set( - StackName='NewStack', - TemplateBody=dummy_template_json, - ChangeSetName='NewChangeSet', - ChangeSetType='CREATE', - ) - cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') - - -@mock_cloudformation -def test_describe_stack_pagination(): - conn = boto3.client('cloudformation', region_name='us-east-1') - for i in range(100): - conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - resp = conn.describe_stacks() - stacks = resp['Stacks'] - stacks.should.have.length_of(50) - next_token = resp['NextToken'] - next_token.should_not.be.none - resp2 = conn.describe_stacks(NextToken=next_token) - stacks.extend(resp2['Stacks']) - stacks.should.have.length_of(100) - assert 'NextToken' not in resp2.keys() - - -@mock_cloudformation -def test_describe_stack_resources(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - - response = cf_conn.describe_stack_resources(StackName=stack['StackName']) - resource = response['StackResources'][0] - resource['LogicalResourceId'].should.equal('EC2Instance1') - resource['ResourceStatus'].should.equal('CREATE_COMPLETE') - resource['ResourceType'].should.equal('AWS::EC2::Instance') - resource['StackId'].should.equal(stack['StackId']) - - -@mock_cloudformation -def test_describe_stack_by_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack['StackName'].should.equal('test_stack') - - -@mock_cloudformation -def test_describe_stack_by_stack_id(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_by_id = cf_conn.describe_stacks(StackName=stack['StackId'])['Stacks'][ - 0] - - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - - -@mock_cloudformation -def test_list_stacks(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - cf.create_stack( - StackName="test_stack2", - TemplateBody=dummy_template_json, - ) - - stacks = list(cf.stacks.all()) - stacks.should.have.length_of(2) - stack_names = [stack.stack_name for stack in stacks] - stack_names.should.contain("test_stack") - stack_names.should.contain("test_stack2") - - -@mock_cloudformation -def test_delete_stack_from_resource(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - list(cf.stacks.all()).should.have.length_of(1) - stack.delete() - list(cf.stacks.all()).should.have.length_of(0) - - -@mock_cloudformation -@mock_ec2 -def test_delete_stack_by_name(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - cf_conn.describe_stacks()['Stacks'].should.have.length_of(1) - cf_conn.delete_stack(StackName="test_stack") - cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) - - -@mock_cloudformation -def test_describe_deleted_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_id = stack['StackId'] - cf_conn.delete_stack(StackName=stack['StackId']) - stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") - - -@mock_cloudformation -@mock_ec2 -def test_describe_updated_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - Tags=[{'Key': 'foo', 'Value': 'bar'}], - ) - - cf_conn.update_stack( - StackName="test_stack", - RoleARN='arn:aws:iam::123456789012:role/moto', - TemplateBody=dummy_update_template_json, - Tags=[{'Key': 'foo', 'Value': 'baz'}], - ) - - stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] - stack_id = stack['StackId'] - stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] - stack_by_id['StackId'].should.equal(stack['StackId']) - stack_by_id['StackName'].should.equal("test_stack") - stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE") - stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto') - stack_by_id['Tags'].should.equal([{'Key': 'foo', 'Value': 'baz'}]) - - -@mock_cloudformation -def test_bad_describe_stack(): - cf_conn = boto3.client('cloudformation', region_name='us-east-1') - with assert_raises(ClientError): - cf_conn.describe_stacks(StackName="non_existent_stack") - - -@mock_cloudformation() -def test_cloudformation_params(): - dummy_template_with_params = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Stack 1", - "Resources": {}, - "Parameters": { - "APPNAME": { - "Default": "app-name", - "Description": "The name of the app", - "Type": "String" - } - } - } - dummy_template_with_params_json = json.dumps(dummy_template_with_params) - - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName='test_stack', - TemplateBody=dummy_template_with_params_json, - Parameters=[{ - "ParameterKey": "APPNAME", - "ParameterValue": "testing123", - }], - ) - - stack.parameters.should.have.length_of(1) - param = stack.parameters[0] - param['ParameterKey'].should.equal('APPNAME') - param['ParameterValue'].should.equal('testing123') - - -@mock_cloudformation -def test_stack_tags(): - tags = [ - { - "Key": "foo", - "Value": "bar" - }, - { - "Key": "baz", - "Value": "bleh" - } - ] - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - Tags=tags, - ) - observed_tag_items = set( - item for items in [tag.items() for tag in stack.tags] for item in items) - expected_tag_items = set( - item for items in [tag.items() for tag in tags] for item in items) - observed_tag_items.should.equal(expected_tag_items) - - -@mock_cloudformation -@mock_ec2 -def test_stack_events(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_template_json, - ) - stack.update(TemplateBody=dummy_update_template_json) - stack = cf.Stack(stack.stack_id) - stack.delete() - - # assert begins and ends with stack events - events = list(stack.events.all()) - events[0].resource_type.should.equal("AWS::CloudFormation::Stack") - events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") - - # testing ordering of stack events without assuming resource events will not exist - # the AWS API returns events in reverse chronological order - stack_events_to_look_for = iter([ - ("DELETE_COMPLETE", None), - ("DELETE_IN_PROGRESS", "User Initiated"), - ("UPDATE_COMPLETE", None), - ("UPDATE_IN_PROGRESS", "User Initiated"), - ("CREATE_COMPLETE", None), - ("CREATE_IN_PROGRESS", "User Initiated"), - ]) - try: - for event in events: - event.stack_id.should.equal(stack.stack_id) - event.stack_name.should.equal("test_stack") - event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") - - if event.resource_type == "AWS::CloudFormation::Stack": - event.logical_resource_id.should.equal("test_stack") - event.physical_resource_id.should.equal(stack.stack_id) - - status_to_look_for, reason_to_look_for = next( - stack_events_to_look_for) - event.resource_status.should.equal(status_to_look_for) - if reason_to_look_for is not None: - event.resource_status_reason.should.equal( - reason_to_look_for) - except StopIteration: - assert False, "Too many stack events" - - list(stack_events_to_look_for).should.be.empty - - -@mock_cloudformation -def test_list_exports(): - cf_client = boto3.client('cloudformation', region_name='us-east-1') - cf_resource = boto3.resource('cloudformation', region_name='us-east-1') - stack = cf_resource.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - output_value = 'VPCID' - exports = cf_client.list_exports()['Exports'] - - stack.outputs.should.have.length_of(1) - stack.outputs[0]['OutputValue'].should.equal(output_value) - - exports.should.have.length_of(1) - exports[0]['ExportingStackId'].should.equal(stack.stack_id) - exports[0]['Name'].should.equal('My VPC ID') - exports[0]['Value'].should.equal(output_value) - - -@mock_cloudformation -def test_list_exports_with_token(): - cf = boto3.client('cloudformation', region_name='us-east-1') - for i in range(101): - # Add index to ensure name is unique - dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i) - cf.create_stack( - StackName="test_stack", - TemplateBody=json.dumps(dummy_output_template), - ) - exports = cf.list_exports() - exports['Exports'].should.have.length_of(100) - exports.get('NextToken').should_not.be.none - - more_exports = cf.list_exports(NextToken=exports['NextToken']) - more_exports['Exports'].should.have.length_of(1) - more_exports.get('NextToken').should.be.none - - -@mock_cloudformation -def test_delete_stack_with_export(): - cf = boto3.client('cloudformation', region_name='us-east-1') - stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - - stack_id = stack['StackId'] - exports = cf.list_exports()['Exports'] - exports.should.have.length_of(1) - - cf.delete_stack(StackName=stack_id) - cf.list_exports()['Exports'].should.have.length_of(0) - - -@mock_cloudformation -def test_export_names_must_be_unique(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - first_stack = cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - with assert_raises(ClientError): - cf.create_stack( - StackName="test_stack", - TemplateBody=dummy_output_template_json, - ) - - -@mock_sqs -@mock_cloudformation -def test_stack_with_imports(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - ec2_resource = boto3.resource('sqs', region_name='us-east-1') - - output_stack = cf.create_stack( - StackName="test_stack1", - TemplateBody=dummy_output_template_json, - ) - import_stack = cf.create_stack( - StackName="test_stack2", - TemplateBody=dummy_import_template_json - ) - - output_stack.outputs.should.have.length_of(1) - output = output_stack.outputs[0]['OutputValue'] - queue = ec2_resource.get_queue_by_name(QueueName=output) - queue.should_not.be.none - - -@mock_sqs -@mock_cloudformation -def test_non_json_redrive_policy(): - cf = boto3.resource('cloudformation', region_name='us-east-1') - - stack = cf.create_stack( - StackName="test_stack1", - TemplateBody=dummy_redrive_template_json - ) - - stack.Resource('MainQueue').resource_status\ - .should.equal("CREATE_COMPLETE") - stack.Resource('DeadLetterQueue').resource_status\ - .should.equal("CREATE_COMPLETE") +from __future__ import unicode_literals + +import json +from collections import OrderedDict + +import boto3 +from botocore.exceptions import ClientError +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises + +from moto import mock_cloudformation, mock_s3, mock_sqs, mock_ec2 + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-d3adb33f", + "KeyName": "dummy", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Description", + "Value": "Test tag" + }, + { + "Key": "Name", + "Value": "Name tag for tests" + } + ] + } + } + } +} + +dummy_template_yaml = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + +dummy_template_yaml_with_short_form_func = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: !Join [ ":", [ du, m, my ] ] + InstanceType: t2.micro + Tags: + - Key: Description + Value: Test tag + - Key: Name + Value: Name tag for tests +""" + +dummy_template_yaml_with_ref = """--- +AWSTemplateFormatVersion: 2010-09-09 +Description: Stack1 with yaml template +Parameters: + TagDescription: + Type: String + TagName: + Type: String + +Resources: + EC2Instance1: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-d3adb33f + KeyName: dummy + InstanceType: t2.micro + Tags: + - Key: Description + Value: + Ref: TagDescription + - Key: Name + Value: !Ref TagName +""" + +dummy_update_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "KeyName": { + "Description": "Name of an existing EC2 KeyPair", + "Type": "AWS::EC2::KeyPair::KeyName", + "ConstraintDescription": "must be the name of an existing EC2 KeyPair." + } + }, + "Resources": { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-08111162" + } + } + } +} + +dummy_output_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": { + "Instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-08111162" + } + } + }, + "Outputs": { + "StackVPC": { + "Description": "The ID of the VPC", + "Value": "VPCID", + "Export": { + "Name": "My VPC ID" + } + } + } +} + +dummy_import_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'My VPC ID'}, + "VisibilityTimeout": 60, + } + } + } +} + +dummy_redrive_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MainQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "mainqueue.fifo", + "FifoQueue": True, + "ContentBasedDeduplication": False, + "RedrivePolicy": { + "deadLetterTargetArn": { + "Fn::GetAtt": [ + "DeadLetterQueue", + "Arn" + ] + }, + "maxReceiveCount": 5 + } + } + }, + "DeadLetterQueue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "FifoQueue": True + } + }, + } +} + +dummy_template_json = json.dumps(dummy_template) +dummy_update_template_json = json.dumps(dummy_update_template) +dummy_output_template_json = json.dumps(dummy_output_template) +dummy_import_template_json = json.dumps(dummy_import_template) +dummy_redrive_template_json = json.dumps(dummy_redrive_template) + + + +@mock_cloudformation +def test_boto3_create_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +def test_boto3_create_stack_with_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml) + + +@mock_cloudformation +def test_boto3_create_stack_with_short_form_func_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_short_form_func, + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_short_form_func) + + +@mock_cloudformation +def test_boto3_create_stack_with_ref_yaml(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + params = [ + {'ParameterKey': 'TagDescription', 'ParameterValue': 'desc_ref'}, + {'ParameterKey': 'TagName', 'ParameterValue': 'name_ref'}, + ] + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_yaml_with_ref, + Parameters=params + ) + + cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( + dummy_template_yaml_with_ref) + + +@mock_cloudformation +def test_creating_stacks_across_regions(): + west1_cf = boto3.resource('cloudformation', region_name='us-west-1') + west2_cf = boto3.resource('cloudformation', region_name='us-west-2') + west1_cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + west2_cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + list(west1_cf.stacks.all()).should.have.length_of(1) + list(west2_cf.stacks.all()).should.have.length_of(1) + + +@mock_cloudformation +def test_create_stack_with_notification_arn(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack_with_notifications", + TemplateBody=dummy_template_json, + NotificationARNs=['arn:aws:sns:us-east-1:123456789012:fake-queue'], + ) + + stack = list(cf.stacks.all())[0] + stack.notification_arns.should.contain( + 'arn:aws:sns:us-east-1:123456789012:fake-queue') + + +@mock_cloudformation +def test_create_stack_with_role_arn(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack_with_notifications", + TemplateBody=dummy_template_json, + RoleARN='arn:aws:iam::123456789012:role/moto', + ) + stack = list(cf.stacks.all())[0] + stack.role_arn.should.equal('arn:aws:iam::123456789012:role/moto') + + +@mock_cloudformation +@mock_s3 +def test_create_stack_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + cf_conn.create_stack( + StackName='stack_from_url', + TemplateURL=key_url, + ) + cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +def test_update_stack_with_previous_value(): + name = 'update_stack_with_previous_value' + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName=name, TemplateBody=dummy_template_yaml_with_ref, + Parameters=[ + {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, + ] + ) + cf_conn.update_stack( + StackName=name, UsePreviousTemplate=True, + Parameters=[ + {'ParameterKey': 'TagName', 'UsePreviousValue': True}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, + ] + ) + stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] + tag_name = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagName'][0] + tag_desc = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagDescription'][0] + assert tag_name == 'foo' + assert tag_desc == 'not bar' + + +@mock_cloudformation +@mock_s3 +@mock_ec2 +def test_update_stack_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="update_stack_from_url", + TemplateBody=dummy_template_json, + Tags=[{'Key': 'foo', 'Value': 'bar'}], + ) + + s3_conn.create_bucket(Bucket="foobar") + + s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_update_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + + cf_conn.update_stack( + StackName="update_stack_from_url", + TemplateURL=key_url, + ) + + cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( + json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) + + +@mock_cloudformation +@mock_s3 +def test_create_change_set_from_s3_url(): + s3 = boto3.client('s3') + s3_conn = boto3.resource('s3') + bucket = s3_conn.create_bucket(Bucket="foobar") + + key = s3_conn.Object( + 'foobar', 'template-key').put(Body=dummy_template_json) + key_url = s3.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'foobar', + 'Key': 'template-key' + } + ) + cf_conn = boto3.client('cloudformation', region_name='us-west-1') + response = cf_conn.create_change_set( + StackName='NewStack', + TemplateURL=key_url, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + assert 'arn:aws:cloudformation:us-west-1:123456789:changeSet/NewChangeSet/' in response['Id'] + assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] + + +@mock_cloudformation +def test_execute_change_set_w_arn(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName=change_set['Id']) + + +@mock_cloudformation +def test_execute_change_set_w_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') + + +@mock_cloudformation +def test_describe_stack_pagination(): + conn = boto3.client('cloudformation', region_name='us-east-1') + for i in range(100): + conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + resp = conn.describe_stacks() + stacks = resp['Stacks'] + stacks.should.have.length_of(50) + next_token = resp['NextToken'] + next_token.should_not.be.none + resp2 = conn.describe_stacks(NextToken=next_token) + stacks.extend(resp2['Stacks']) + stacks.should.have.length_of(100) + assert 'NextToken' not in resp2.keys() + + +@mock_cloudformation +def test_describe_stack_resources(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + + response = cf_conn.describe_stack_resources(StackName=stack['StackName']) + resource = response['StackResources'][0] + resource['LogicalResourceId'].should.equal('EC2Instance1') + resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + resource['ResourceType'].should.equal('AWS::EC2::Instance') + resource['StackId'].should.equal(stack['StackId']) + + +@mock_cloudformation +def test_describe_stack_by_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack['StackName'].should.equal('test_stack') + + +@mock_cloudformation +def test_describe_stack_by_stack_id(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_by_id = cf_conn.describe_stacks(StackName=stack['StackId'])['Stacks'][ + 0] + + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + + +@mock_cloudformation +def test_list_stacks(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + cf.create_stack( + StackName="test_stack2", + TemplateBody=dummy_template_json, + ) + + stacks = list(cf.stacks.all()) + stacks.should.have.length_of(2) + stack_names = [stack.stack_name for stack in stacks] + stack_names.should.contain("test_stack") + stack_names.should.contain("test_stack2") + + +@mock_cloudformation +def test_delete_stack_from_resource(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + list(cf.stacks.all()).should.have.length_of(1) + stack.delete() + list(cf.stacks.all()).should.have.length_of(0) + + +@mock_cloudformation +@mock_ec2 +def test_delete_stack_by_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + cf_conn.describe_stacks()['Stacks'].should.have.length_of(1) + cf_conn.delete_stack(StackName="test_stack") + cf_conn.describe_stacks()['Stacks'].should.have.length_of(0) + + +@mock_cloudformation +def test_describe_deleted_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_id = stack['StackId'] + cf_conn.delete_stack(StackName=stack['StackId']) + stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + stack_by_id['StackStatus'].should.equal("DELETE_COMPLETE") + + +@mock_cloudformation +@mock_ec2 +def test_describe_updated_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + Tags=[{'Key': 'foo', 'Value': 'bar'}], + ) + + cf_conn.update_stack( + StackName="test_stack", + RoleARN='arn:aws:iam::123456789012:role/moto', + TemplateBody=dummy_update_template_json, + Tags=[{'Key': 'foo', 'Value': 'baz'}], + ) + + stack = cf_conn.describe_stacks(StackName="test_stack")['Stacks'][0] + stack_id = stack['StackId'] + stack_by_id = cf_conn.describe_stacks(StackName=stack_id)['Stacks'][0] + stack_by_id['StackId'].should.equal(stack['StackId']) + stack_by_id['StackName'].should.equal("test_stack") + stack_by_id['StackStatus'].should.equal("UPDATE_COMPLETE") + stack_by_id['RoleARN'].should.equal('arn:aws:iam::123456789012:role/moto') + stack_by_id['Tags'].should.equal([{'Key': 'foo', 'Value': 'baz'}]) + + +@mock_cloudformation +def test_bad_describe_stack(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + with assert_raises(ClientError): + cf_conn.describe_stacks(StackName="non_existent_stack") + + +@mock_cloudformation() +def test_cloudformation_params(): + dummy_template_with_params = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Stack 1", + "Resources": {}, + "Parameters": { + "APPNAME": { + "Default": "app-name", + "Description": "The name of the app", + "Type": "String" + } + } + } + dummy_template_with_params_json = json.dumps(dummy_template_with_params) + + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName='test_stack', + TemplateBody=dummy_template_with_params_json, + Parameters=[{ + "ParameterKey": "APPNAME", + "ParameterValue": "testing123", + }], + ) + + stack.parameters.should.have.length_of(1) + param = stack.parameters[0] + param['ParameterKey'].should.equal('APPNAME') + param['ParameterValue'].should.equal('testing123') + + +@mock_cloudformation +def test_stack_tags(): + tags = [ + { + "Key": "foo", + "Value": "bar" + }, + { + "Key": "baz", + "Value": "bleh" + } + ] + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + Tags=tags, + ) + observed_tag_items = set( + item for items in [tag.items() for tag in stack.tags] for item in items) + expected_tag_items = set( + item for items in [tag.items() for tag in tags] for item in items) + observed_tag_items.should.equal(expected_tag_items) + + +@mock_cloudformation +@mock_ec2 +def test_stack_events(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_template_json, + ) + stack.update(TemplateBody=dummy_update_template_json) + stack = cf.Stack(stack.stack_id) + stack.delete() + + # assert begins and ends with stack events + events = list(stack.events.all()) + events[0].resource_type.should.equal("AWS::CloudFormation::Stack") + events[-1].resource_type.should.equal("AWS::CloudFormation::Stack") + + # testing ordering of stack events without assuming resource events will not exist + # the AWS API returns events in reverse chronological order + stack_events_to_look_for = iter([ + ("DELETE_COMPLETE", None), + ("DELETE_IN_PROGRESS", "User Initiated"), + ("UPDATE_COMPLETE", None), + ("UPDATE_IN_PROGRESS", "User Initiated"), + ("CREATE_COMPLETE", None), + ("CREATE_IN_PROGRESS", "User Initiated"), + ]) + try: + for event in events: + event.stack_id.should.equal(stack.stack_id) + event.stack_name.should.equal("test_stack") + event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}") + + if event.resource_type == "AWS::CloudFormation::Stack": + event.logical_resource_id.should.equal("test_stack") + event.physical_resource_id.should.equal(stack.stack_id) + + status_to_look_for, reason_to_look_for = next( + stack_events_to_look_for) + event.resource_status.should.equal(status_to_look_for) + if reason_to_look_for is not None: + event.resource_status_reason.should.equal( + reason_to_look_for) + except StopIteration: + assert False, "Too many stack events" + + list(stack_events_to_look_for).should.be.empty + + +@mock_cloudformation +def test_list_exports(): + cf_client = boto3.client('cloudformation', region_name='us-east-1') + cf_resource = boto3.resource('cloudformation', region_name='us-east-1') + stack = cf_resource.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + output_value = 'VPCID' + exports = cf_client.list_exports()['Exports'] + + stack.outputs.should.have.length_of(1) + stack.outputs[0]['OutputValue'].should.equal(output_value) + + exports.should.have.length_of(1) + exports[0]['ExportingStackId'].should.equal(stack.stack_id) + exports[0]['Name'].should.equal('My VPC ID') + exports[0]['Value'].should.equal(output_value) + + +@mock_cloudformation +def test_list_exports_with_token(): + cf = boto3.client('cloudformation', region_name='us-east-1') + for i in range(101): + # Add index to ensure name is unique + dummy_output_template['Outputs']['StackVPC']['Export']['Name'] += str(i) + cf.create_stack( + StackName="test_stack", + TemplateBody=json.dumps(dummy_output_template), + ) + exports = cf.list_exports() + exports['Exports'].should.have.length_of(100) + exports.get('NextToken').should_not.be.none + + more_exports = cf.list_exports(NextToken=exports['NextToken']) + more_exports['Exports'].should.have.length_of(1) + more_exports.get('NextToken').should.be.none + + +@mock_cloudformation +def test_delete_stack_with_export(): + cf = boto3.client('cloudformation', region_name='us-east-1') + stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + + stack_id = stack['StackId'] + exports = cf.list_exports()['Exports'] + exports.should.have.length_of(1) + + cf.delete_stack(StackName=stack_id) + cf.list_exports()['Exports'].should.have.length_of(0) + + +@mock_cloudformation +def test_export_names_must_be_unique(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + first_stack = cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + with assert_raises(ClientError): + cf.create_stack( + StackName="test_stack", + TemplateBody=dummy_output_template_json, + ) + + +@mock_sqs +@mock_cloudformation +def test_stack_with_imports(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + ec2_resource = boto3.resource('sqs', region_name='us-east-1') + + output_stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_output_template_json, + ) + import_stack = cf.create_stack( + StackName="test_stack2", + TemplateBody=dummy_import_template_json + ) + + output_stack.outputs.should.have.length_of(1) + output = output_stack.outputs[0]['OutputValue'] + queue = ec2_resource.get_queue_by_name(QueueName=output) + queue.should_not.be.none + + +@mock_sqs +@mock_cloudformation +def test_non_json_redrive_policy(): + cf = boto3.resource('cloudformation', region_name='us-east-1') + + stack = cf.create_stack( + StackName="test_stack1", + TemplateBody=dummy_redrive_template_json + ) + + stack.Resource('MainQueue').resource_status\ + .should.equal("CREATE_COMPLETE") + stack.Resource('DeadLetterQueue').resource_status\ + .should.equal("CREATE_COMPLETE") diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index 2c808726f..936f7c2a1 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -1,2427 +1,2427 @@ -from __future__ import unicode_literals -import json - -import base64 -import boto -import boto.cloudformation -import boto.datapipeline -import boto.ec2 -import boto.ec2.autoscale -import boto.ec2.elb -from boto.exception import BotoServerError -import boto.iam -import boto.redshift -import boto.sns -import boto.sqs -import boto.vpc -import boto3 -import sure # noqa - -from moto import ( - mock_autoscaling_deprecated, - mock_cloudformation, - mock_cloudformation_deprecated, - mock_datapipeline_deprecated, - mock_ec2, - mock_ec2_deprecated, - mock_elb, - mock_elb_deprecated, - mock_iam_deprecated, - mock_kms, - mock_lambda, - mock_rds_deprecated, - mock_rds2, - mock_rds2_deprecated, - mock_redshift, - mock_redshift_deprecated, - mock_route53_deprecated, - mock_sns_deprecated, - mock_sqs, - mock_sqs_deprecated, - mock_elbv2) - -from .fixtures import ( - ec2_classic_eip, - fn_join, - rds_mysql_with_db_parameter_group, - rds_mysql_with_read_replica, - redshift, - route53_ec2_instance_with_public_ip, - route53_health_check, - route53_roundrobin, - single_instance_with_ebs_volume, - vpc_eip, - vpc_single_instance_in_subnet, -) - - -@mock_cloudformation_deprecated() -def test_stack_sqs_integration(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - stack = conn.describe_stacks()[0] - queue = stack.describe_resources()[0] - queue.resource_type.should.equal('AWS::SQS::Queue') - queue.logical_resource_id.should.equal("QueueGroup") - queue.physical_resource_id.should.equal("my-queue") - - -@mock_cloudformation_deprecated() -def test_stack_list_resources(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - resources = conn.list_stack_resources("test_stack") - assert len(resources) == 1 - queue = resources[0] - queue.resource_type.should.equal('AWS::SQS::Queue') - queue.logical_resource_id.should.equal("QueueGroup") - queue.physical_resource_id.should.equal("my-queue") - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')[ - 'VisibilityTimeout'].should.equal('60') - - sqs_template['Resources']['QueueGroup'][ - 'Properties']['VisibilityTimeout'] = 100 - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - queues[0].get_attributes('VisibilityTimeout')[ - 'VisibilityTimeout'].should.equal('100') - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack_and_remove_resource(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - - sqs_template['Resources'].pop('QueueGroup') - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(0) - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_update_stack_and_add_resource(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": {}, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=sqs_template_json, - ) - - sqs_conn = boto.sqs.connect_to_region("us-west-1") - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(0) - - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "QueueGroup": { - - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - conn.update_stack("test_stack", sqs_template_json) - - queues = sqs_conn.get_all_queues() - queues.should.have.length_of(1) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_stack_ec2_integration(): - ec2_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "WebServerGroup": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - ec2_template_json = json.dumps(ec2_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "ec2_stack", - template_body=ec2_template_json, - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - stack = conn.describe_stacks()[0] - instance = stack.describe_resources()[0] - instance.resource_type.should.equal('AWS::EC2::Instance') - instance.logical_resource_id.should.contain("WebServerGroup") - instance.physical_resource_id.should.equal(ec2_instance.id) - - -@mock_ec2_deprecated() -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_attached_ec2_instances(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "Instances": [{"Ref": "Ec2Instance1"}], - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-east-1'], - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - } - }, - "Ec2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - load_balancer.instances[0].id.should.equal(ec2_instance.id) - list(load_balancer.availability_zones).should.equal(['us-east-1']) - - -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_health_check(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-west-1'], - "HealthCheck": { - "HealthyThreshold": "3", - "Interval": "5", - "Target": "HTTP:80/healthcheck", - "Timeout": "4", - "UnhealthyThreshold": "2", - }, - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - health_check = load_balancer.health_check - - health_check.healthy_threshold.should.equal(3) - health_check.interval.should.equal(5) - health_check.target.should.equal("HTTP:80/healthcheck") - health_check.timeout.should.equal(4) - health_check.unhealthy_threshold.should.equal(2) - - -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -def test_stack_elb_integration_with_update(): - elb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MyELB": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "LoadBalancerName": "test-elb", - "AvailabilityZones": ['us-west-1a'], - "Listeners": [ - { - "InstancePort": "80", - "LoadBalancerPort": "80", - "Protocol": "HTTP", - } - ], - "Policies": {"Ref": "AWS::NoValue"}, - } - }, - }, - } - elb_template_json = json.dumps(elb_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "elb_stack", - template_body=elb_template_json, - ) - - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - load_balancer = elb_conn.get_all_load_balancers()[0] - load_balancer.availability_zones[0].should.equal('us-west-1a') - - elb_template['Resources']['MyELB']['Properties'][ - 'AvailabilityZones'] = ['us-west-1b'] - elb_template_json = json.dumps(elb_template) - conn.update_stack( - "elb_stack", - template_body=elb_template_json, - ) - load_balancer = elb_conn.get_all_load_balancers()[0] - load_balancer.availability_zones[0].should.equal('us-west-1b') - - -@mock_ec2_deprecated() -@mock_redshift_deprecated() -@mock_cloudformation_deprecated() -def test_redshift_stack(): - redshift_template_json = json.dumps(redshift.template) - - vpc_conn = boto.vpc.connect_to_region("us-west-2") - conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack( - "redshift_stack", - template_body=redshift_template_json, - parameters=[ - ("DatabaseName", "mydb"), - ("ClusterType", "multi-node"), - ("NumberOfNodes", 2), - ("NodeType", "dw1.xlarge"), - ("MasterUsername", "myuser"), - ("MasterUserPassword", "mypass"), - ("InboundTraffic", "10.0.0.1/16"), - ("PortNumber", 5439), - ] - ) - - redshift_conn = boto.redshift.connect_to_region("us-west-2") - - cluster_res = redshift_conn.describe_clusters() - clusters = cluster_res['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - clusters.should.have.length_of(1) - cluster = clusters[0] - cluster['DBName'].should.equal("mydb") - cluster['NumberOfNodes'].should.equal(2) - cluster['NodeType'].should.equal("dw1.xlarge") - cluster['MasterUsername'].should.equal("myuser") - cluster['Port'].should.equal(5439) - cluster['VpcSecurityGroups'].should.have.length_of(1) - security_group_id = cluster['VpcSecurityGroups'][0]['VpcSecurityGroupId'] - - groups = vpc_conn.get_all_security_groups(group_ids=[security_group_id]) - groups.should.have.length_of(1) - group = groups[0] - group.rules.should.have.length_of(1) - group.rules[0].grants[0].cidr_ip.should.equal("10.0.0.1/16") - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_stack_security_groups(): - security_group_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "my-security-group": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "My other group", - }, - }, - "Ec2Instance2": { - "Type": "AWS::EC2::Instance", - "Properties": { - "SecurityGroups": [{"Ref": "InstanceSecurityGroup"}], - "ImageId": "ami-1234abcd", - } - }, - "InstanceSecurityGroup": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "My security group", - "Tags": [ - { - "Key": "bar", - "Value": "baz" - } - ], - "SecurityGroupIngress": [{ - "IpProtocol": "tcp", - "FromPort": "22", - "ToPort": "22", - "CidrIp": "123.123.123.123/32", - }, { - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8000", - "SourceSecurityGroupId": {"Ref": "my-security-group"}, - }] - } - } - }, - } - security_group_template_json = json.dumps(security_group_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "security_group_stack", - template_body=security_group_template_json, - tags={"foo": "bar"} - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - instance_group = ec2_conn.get_all_security_groups( - filters={'description': ['My security group']})[0] - other_group = ec2_conn.get_all_security_groups( - filters={'description': ['My other group']})[0] - - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - ec2_instance.groups[0].id.should.equal(instance_group.id) - instance_group.description.should.equal("My security group") - instance_group.tags.should.have.key('foo').which.should.equal('bar') - instance_group.tags.should.have.key('bar').which.should.equal('baz') - rule1, rule2 = instance_group.rules - int(rule1.to_port).should.equal(22) - int(rule1.from_port).should.equal(22) - rule1.grants[0].cidr_ip.should.equal("123.123.123.123/32") - rule1.ip_protocol.should.equal('tcp') - - int(rule2.to_port).should.equal(8000) - int(rule2.from_port).should.equal(80) - rule2.ip_protocol.should.equal('tcp') - rule2.grants[0].group_id.should.equal(other_group.id) - - -@mock_autoscaling_deprecated() -@mock_elb_deprecated() -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_autoscaling_group_with_elb(): - web_setup_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Resources": { - "my-as-group": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AvailabilityZones": ['us-east1'], - "LaunchConfigurationName": {"Ref": "my-launch-config"}, - "MinSize": "2", - "MaxSize": "2", - "DesiredCapacity": "2", - "LoadBalancerNames": [{"Ref": "my-elb"}], - "Tags": [ - { - "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", - "PropagateAtLaunch": True}, - { - "Key": "not-propagated-test-tag", - "Value": "not-propagated-test-tag-value", - "PropagateAtLaunch": False - } - ] - }, - }, - - "my-launch-config": { - "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - - "my-elb": { - "Type": "AWS::ElasticLoadBalancing::LoadBalancer", - "Properties": { - "AvailabilityZones": ['us-east1'], - "Listeners": [{ - "LoadBalancerPort": "80", - "InstancePort": "80", - "Protocol": "HTTP", - }], - "LoadBalancerName": "my-elb", - "HealthCheck": { - "Target": "HTTP:80", - "HealthyThreshold": "3", - "UnhealthyThreshold": "5", - "Interval": "30", - "Timeout": "5", - }, - }, - }, - } - } - - web_setup_template_json = json.dumps(web_setup_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "web_stack", - template_body=web_setup_template_json, - ) - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - autoscale_group = autoscale_conn.get_all_groups()[0] - autoscale_group.launch_config_name.should.contain("my-launch-config") - autoscale_group.load_balancers[0].should.equal('my-elb') - - # Confirm the Launch config was actually created - autoscale_conn.get_all_launch_configurations().should.have.length_of(1) - - # Confirm the ELB was actually created - elb_conn = boto.ec2.elb.connect_to_region("us-west-1") - elb_conn.get_all_load_balancers().should.have.length_of(1) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - as_group_resource = [resource for resource in resources if resource.resource_type == - 'AWS::AutoScaling::AutoScalingGroup'][0] - as_group_resource.physical_resource_id.should.contain("my-as-group") - - launch_config_resource = [ - resource for resource in resources if - resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] - launch_config_resource.physical_resource_id.should.contain( - "my-launch-config") - - elb_resource = [resource for resource in resources if resource.resource_type == - 'AWS::ElasticLoadBalancing::LoadBalancer'][0] - elb_resource.physical_resource_id.should.contain("my-elb") - - # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region('us-west-1') - reservations = ec2_conn.get_all_reservations() - len(reservations).should.equal(1) - reservation = reservations[0] - len(reservation.instances).should.equal(2) - for instance in reservation.instances: - instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') - instance.tags.keys().should_not.contain('not-propagated-test-tag') - - -@mock_autoscaling_deprecated() -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_autoscaling_group_update(): - asg_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "my-as-group": { - "Type": "AWS::AutoScaling::AutoScalingGroup", - "Properties": { - "AvailabilityZones": ['us-west-1'], - "LaunchConfigurationName": {"Ref": "my-launch-config"}, - "MinSize": "2", - "MaxSize": "2", - "DesiredCapacity": "2" - }, - }, - - "my-launch-config": { - "Type": "AWS::AutoScaling::LaunchConfiguration", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - asg_template_json = json.dumps(asg_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "asg_stack", - template_body=asg_template_json, - ) - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - asg = autoscale_conn.get_all_groups()[0] - asg.min_size.should.equal(2) - asg.max_size.should.equal(2) - asg.desired_capacity.should.equal(2) - - asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 - asg_template['Resources']['my-as-group']['Properties']['Tags'] = [ - { - "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", - "PropagateAtLaunch": True}, - { - "Key": "not-propagated-test-tag", - "Value": "not-propagated-test-tag-value", - "PropagateAtLaunch": False - } - ] - asg_template_json = json.dumps(asg_template) - conn.update_stack( - "asg_stack", - template_body=asg_template_json, - ) - asg = autoscale_conn.get_all_groups()[0] - asg.min_size.should.equal(2) - asg.max_size.should.equal(3) - asg.desired_capacity.should.equal(2) - - # confirm the instances were created with the right tags - ec2_conn = boto.ec2.connect_to_region('us-west-1') - reservations = ec2_conn.get_all_reservations() - running_instance_count = 0 - for res in reservations: - for instance in res.instances: - if instance.state == 'running': - running_instance_count += 1 - instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') - instance.tags.keys().should_not.contain('not-propagated-test-tag') - running_instance_count.should.equal(2) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_vpc_single_instance_in_subnet(): - template_json = json.dumps(vpc_single_instance_in_subnet.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[("KeyName", "my_key")], - ) - - vpc_conn = boto.vpc.connect_to_region("us-west-1") - - vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] - vpc.cidr_block.should.equal("10.0.0.0/16") - - # Add this once we implement the endpoint - # vpc_conn.get_all_internet_gateways().should.have.length_of(1) - - subnet = vpc_conn.get_all_subnets(filters={'vpcId': vpc.id})[0] - subnet.vpc_id.should.equal(vpc.id) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - instance = reservation.instances[0] - instance.tags["Foo"].should.equal("Bar") - # Check that the EIP is attached the the EC2 instance - eip = ec2_conn.get_all_addresses()[0] - eip.domain.should.equal('vpc') - eip.instance_id.should.equal(instance.id) - - security_group = ec2_conn.get_all_security_groups( - filters={'vpc_id': [vpc.id]})[0] - security_group.vpc_id.should.equal(vpc.id) - - stack = conn.describe_stacks()[0] - - vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) - - resources = stack.describe_resources() - vpc_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] - vpc_resource.physical_resource_id.should.equal(vpc.id) - - subnet_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] - subnet_resource.physical_resource_id.should.equal(subnet.id) - - eip_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - eip_resource.physical_resource_id.should.equal(eip.public_ip) - - -@mock_cloudformation() -@mock_ec2() -@mock_rds2() -def test_rds_db_parameter_groups(): - ec2_conn = boto3.client("ec2", region_name="us-west-1") - ec2_conn.create_security_group( - GroupName='application', Description='Our Application Group') - - template_json = json.dumps(rds_mysql_with_db_parameter_group.template) - cf_conn = boto3.client('cloudformation', 'us-west-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - Parameters=[{'ParameterKey': key, 'ParameterValue': value} for - key, value in [ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ] - ], - ) - - rds_conn = boto3.client('rds', region_name="us-west-1") - - db_parameter_groups = rds_conn.describe_db_parameter_groups() - len(db_parameter_groups['DBParameterGroups']).should.equal(1) - db_parameter_group_name = db_parameter_groups[ - 'DBParameterGroups'][0]['DBParameterGroupName'] - - found_cloudformation_set_parameter = False - for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)[ - 'Parameters']: - if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter[ - 'ParameterValue'] == '2048': - found_cloudformation_set_parameter = True - - found_cloudformation_set_parameter.should.equal(True) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_rds_deprecated() -def test_rds_mysql_with_read_replica(): - ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group('application', 'Our Application Group') - - template_json = json.dumps(rds_mysql_with_read_replica.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("EC2SecurityGroup", "application"), - ("MultiAZ", "true"), - ], - ) - - rds_conn = boto.rds.connect_to_region("us-west-1") - - primary = rds_conn.get_all_dbinstances("master_db")[0] - primary.master_username.should.equal("my_user") - primary.allocated_storage.should.equal(20) - primary.instance_class.should.equal("db.m1.medium") - primary.multi_az.should.equal(True) - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1) - replica_id = primary.read_replica_dbinstance_identifiers[0] - - replica = rds_conn.get_all_dbinstances(replica_id)[0] - replica.instance_class.should.equal("db.m1.medium") - - security_group_name = primary.security_groups[0].name - security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0] - security_group.ec2_groups[0].name.should.equal("application") - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_rds_deprecated() -def test_rds_mysql_with_read_replica_in_vpc(): - template_json = json.dumps(rds_mysql_with_read_replica.template) - conn = boto.cloudformation.connect_to_region("eu-central-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[ - ("DBInstanceIdentifier", "master_db"), - ("DBName", "my_db"), - ("DBUser", "my_user"), - ("DBPassword", "my_password"), - ("DBAllocatedStorage", "20"), - ("DBInstanceClass", "db.m1.medium"), - ("MultiAZ", "true"), - ], - ) - - rds_conn = boto.rds.connect_to_region("eu-central-1") - primary = rds_conn.get_all_dbinstances("master_db")[0] - - subnet_group_name = primary.subnet_group.name - subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0] - subnet_group.description.should.equal("my db subnet group") - - -@mock_autoscaling_deprecated() -@mock_iam_deprecated() -@mock_cloudformation_deprecated() -def test_iam_roles(): - iam_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Resources": { - - "my-launch-config": { - "Properties": { - "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, - "ImageId": "ami-1234abcd", - }, - "Type": "AWS::AutoScaling::LaunchConfiguration" - }, - "my-instance-profile-with-path": { - "Properties": { - "Path": "my-path", - "Roles": [{"Ref": "my-role-with-path"}], - }, - "Type": "AWS::IAM::InstanceProfile" - }, - "my-instance-profile-no-path": { - "Properties": { - "Roles": [{"Ref": "my-role-no-path"}], - }, - "Type": "AWS::IAM::InstanceProfile" - }, - "my-role-with-path": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": [ - "sts:AssumeRole" - ], - "Effect": "Allow", - "Principal": { - "Service": [ - "ec2.amazonaws.com" - ] - } - } - ] - }, - "Path": "my-path", - "Policies": [ - { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "ec2:CreateTags", - "ec2:DescribeInstances", - "ec2:DescribeTags" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "EC2_Tags" - }, - { - "PolicyDocument": { - "Statement": [ - { - "Action": [ - "sqs:*" - ], - "Effect": "Allow", - "Resource": [ - "*" - ] - } - ], - "Version": "2012-10-17" - }, - "PolicyName": "SQS" - }, - ] - }, - "Type": "AWS::IAM::Role" - }, - "my-role-no-path": { - "Properties": { - "AssumeRolePolicyDocument": { - "Statement": [ - { - "Action": [ - "sts:AssumeRole" - ], - "Effect": "Allow", - "Principal": { - "Service": [ - "ec2.amazonaws.com" - ] - } - } - ] - }, - }, - "Type": "AWS::IAM::Role" - } - } - } - - iam_template_json = json.dumps(iam_template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=iam_template_json, - ) - - iam_conn = boto.iam.connect_to_region("us-west-1") - - role_results = iam_conn.list_roles()['list_roles_response'][ - 'list_roles_result']['roles'] - role_name_to_id = {} - for role_result in role_results: - role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - if 'with-path' in role.role_name: - role_name_to_id['with-path'] = role.role_id - role.path.should.equal("my-path") - else: - role_name_to_id['no-path'] = role.role_id - role.role_name.should.contain('no-path') - role.path.should.equal('/') - - instance_profile_responses = iam_conn.list_instance_profiles()[ - 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] - instance_profile_responses.should.have.length_of(2) - instance_profile_names = [] - - for instance_profile_response in instance_profile_responses: - instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) - instance_profile_names.append(instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain( - "my-instance-profile") - if "with-path" in instance_profile.instance_profile_name: - instance_profile.path.should.equal("my-path") - instance_profile.role_id.should.equal(role_name_to_id['with-path']) - else: - instance_profile.instance_profile_name.should.contain('no-path') - instance_profile.role_id.should.equal(role_name_to_id['no-path']) - instance_profile.path.should.equal('/') - - autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") - launch_config = autoscale_conn.get_all_launch_configurations()[0] - launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - instance_profile_resources = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] - {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) - - role_resources = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] - {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_single_instance_with_ebs_volume(): - template_json = json.dumps(single_instance_with_ebs_volume.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - parameters=[("KeyName", "key_name")] - ) - - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - - volumes = ec2_conn.get_all_volumes() - # Grab the mounted drive - volume = [ - volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] - volume.volume_state().should.equal('in-use') - volume.attach_data.instance_id.should.equal(ec2_instance.id) - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - ebs_volumes = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] - ebs_volumes[0].physical_resource_id.should.equal(volume.id) - - -@mock_cloudformation_deprecated() -def test_create_template_without_required_param(): - template_json = json.dumps(single_instance_with_ebs_volume.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack.when.called_with( - "test_stack", - template_body=template_json, - ).should.throw(BotoServerError) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_classic_eip(): - template_json = json.dumps(ec2_classic_eip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eip = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - cfn_eip.physical_resource_id.should.equal(eip.public_ip) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_vpc_eip(): - template_json = json.dumps(vpc_eip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eip = [ - resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] - cfn_eip.physical_resource_id.should.equal(eip.public_ip) - - -@mock_ec2_deprecated() -@mock_cloudformation_deprecated() -def test_fn_join(): - template_json = json.dumps(fn_join.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack("test_stack", template_body=template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eip = ec2_conn.get_all_addresses()[0] - - stack = conn.describe_stacks()[0] - fn_join_output = stack.outputs[0] - fn_join_output.value.should.equal('test eip:{0}'.format(eip.public_ip)) - - -@mock_cloudformation_deprecated() -@mock_sqs_deprecated() -def test_conditional_resources(): - sqs_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Parameters": { - "EnvType": { - "Description": "Environment type.", - "Type": "String", - } - }, - "Conditions": { - "CreateQueue": {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]} - }, - "Resources": { - "QueueGroup": { - "Condition": "CreateQueue", - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - }, - } - sqs_template_json = json.dumps(sqs_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack_without_queue", - template_body=sqs_template_json, - parameters=[("EnvType", "staging")], - ) - sqs_conn = boto.sqs.connect_to_region("us-west-1") - list(sqs_conn.get_all_queues()).should.have.length_of(0) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack_with_queue", - template_body=sqs_template_json, - parameters=[("EnvType", "prod")], - ) - sqs_conn = boto.sqs.connect_to_region("us-west-1") - list(sqs_conn.get_all_queues()).should.have.length_of(1) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_conditional_if_handling(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Conditions": { - "EnvEqualsPrd": { - "Fn::Equals": [ - { - "Ref": "ENV" - }, - "prd" - ] - } - }, - "Parameters": { - "ENV": { - "Default": "dev", - "Description": "Deployment environment for the stack (dev/prd)", - "Type": "String" - }, - }, - "Description": "Stack 1", - "Resources": { - "App1": { - "Properties": { - "ImageId": { - "Fn::If": [ - "EnvEqualsPrd", - "ami-00000000", - "ami-ffffffff" - ] - }, - }, - "Type": "AWS::EC2::Instance" - }, - } - } - dummy_template_json = json.dumps(dummy_template) - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-ffffffff") - ec2_instance.terminate() - - conn = boto.cloudformation.connect_to_region("us-west-2") - conn.create_stack( - 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) - ec2_conn = boto.ec2.connect_to_region("us-west-2") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-00000000") - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -def test_cloudformation_mapping(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Mappings": { - "RegionMap": { - "us-east-1": {"32": "ami-6411e20d", "64": "ami-7a11e213"}, - "us-west-1": {"32": "ami-c9c7978c", "64": "ami-cfc7978a"}, - "eu-west-1": {"32": "ami-37c2f643", "64": "ami-31c2f645"}, - "ap-southeast-1": {"32": "ami-66f28c34", "64": "ami-60f28c32"}, - "ap-northeast-1": {"32": "ami-9c03a89d", "64": "ami-a003a8a1"} - } - }, - "Resources": { - "WebServer": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": { - "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "32"] - }, - "InstanceType": "m1.small" - }, - "Type": "AWS::EC2::Instance", - }, - }, - } - - dummy_template_json = json.dumps(dummy_template) - - conn = boto.cloudformation.connect_to_region("us-east-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-east-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-6411e20d") - - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack('test_stack1', template_body=dummy_template_json) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - reservation = ec2_conn.get_all_instances()[0] - ec2_instance = reservation.instances[0] - ec2_instance.image_id.should.equal("ami-c9c7978c") - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_roundrobin(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_roundrobin.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - stack = conn.create_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.hosted_zone_id.should.equal(zone_id) - rrsets.should.have.length_of(2) - record_set1 = rrsets[0] - record_set1.name.should.equal('test_stack.us-west-1.my_zone.') - record_set1.identifier.should.equal("test_stack AWS") - record_set1.type.should.equal('CNAME') - record_set1.ttl.should.equal('900') - record_set1.weight.should.equal('3') - record_set1.resource_records[0].should.equal("aws.amazon.com") - - record_set2 = rrsets[1] - record_set2.name.should.equal('test_stack.us-west-1.my_zone.') - record_set2.identifier.should.equal("test_stack Amazon") - record_set2.type.should.equal('CNAME') - record_set2.ttl.should.equal('900') - record_set2.weight.should.equal('1') - record_set2.resource_records[0].should.equal("www.amazon.com") - - stack = conn.describe_stacks()[0] - output = stack.outputs[0] - output.key.should.equal('DomainName') - output.value.should.equal( - 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) - - -@mock_cloudformation_deprecated() -@mock_ec2_deprecated() -@mock_route53_deprecated() -def test_route53_ec2_instance_with_public_ip(): - route53_conn = boto.connect_route53() - ec2_conn = boto.ec2.connect_to_region("us-west-1") - - template_json = json.dumps(route53_ec2_instance_with_public_ip.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - - instance_id = ec2_conn.get_all_reservations()[0].instances[0].id - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set1 = rrsets[0] - record_set1.name.should.equal('{0}.us-west-1.my_zone.'.format(instance_id)) - record_set1.identifier.should.equal(None) - record_set1.type.should.equal('A') - record_set1.ttl.should.equal('900') - record_set1.weight.should.equal(None) - record_set1.resource_records[0].should.equal("10.0.0.25") - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_associate_health_check(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_health_check.template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - - checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - check = checks[0] - health_check_id = check['Id'] - config = check['HealthCheckConfig'] - config["FailureThreshold"].should.equal("3") - config["IPAddress"].should.equal("10.0.0.4") - config["Port"].should.equal("80") - config["RequestInterval"].should.equal("10") - config["ResourcePath"].should.equal("/") - config["Type"].should.equal("HTTP") - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.health_check.should.equal(health_check_id) - - -@mock_cloudformation_deprecated() -@mock_route53_deprecated() -def test_route53_with_update(): - route53_conn = boto.connect_route53() - - template_json = json.dumps(route53_health_check.template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.resource_records.should.equal(["my.example.com"]) - - route53_health_check.template['Resources']['myDNSRecord'][ - 'Properties']['ResourceRecords'] = ["my_other.example.com"] - template_json = json.dumps(route53_health_check.template) - cf_conn.update_stack( - "test_stack", - template_body=template_json, - ) - - zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ - 'HostedZones'] - list(zones).should.have.length_of(1) - zone_id = zones[0]['Id'] - zone_id = zone_id.split('/') - zone_id = zone_id[2] - - rrsets = route53_conn.get_all_rrsets(zone_id) - rrsets.should.have.length_of(1) - - record_set = rrsets[0] - record_set.resource_records.should.equal(["my_other.example.com"]) - - -@mock_cloudformation_deprecated() -@mock_sns_deprecated() -def test_sns_topic(): - dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "MySNSTopic": { - "Type": "AWS::SNS::Topic", - "Properties": { - "Subscription": [ - {"Endpoint": "https://example.com", "Protocol": "https"}, - ], - "TopicName": "my_topics", - } - } - }, - "Outputs": { - "topic_name": { - "Value": {"Fn::GetAtt": ["MySNSTopic", "TopicName"]} - }, - "topic_arn": { - "Value": {"Ref": "MySNSTopic"} - }, - } - } - template_json = json.dumps(dummy_template) - conn = boto.cloudformation.connect_to_region("us-west-1") - stack = conn.create_stack( - "test_stack", - template_body=template_json, - ) - - sns_conn = boto.sns.connect_to_region("us-west-1") - topics = sns_conn.get_all_topics()["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - topics.should.have.length_of(1) - topic_arn = topics[0]['TopicArn'] - topic_arn.should.contain("my_topics") - - subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("https") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("https://example.com") - - stack = conn.describe_stacks()[0] - topic_name_output = [x for x in stack.outputs if x.key == 'topic_name'][0] - topic_name_output.value.should.equal("my_topics") - topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0] - topic_arn_output.value.should.equal(topic_arn) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "internetgateway": { - "Type": "AWS::EC2::InternetGateway" - }, - "testvpc": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - "EnableDnsHostnames": "true", - "EnableDnsSupport": "true", - "InstanceTenancy": "default" - }, - }, - "vpcgatewayattachment": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "InternetGatewayId": { - "Ref": "internetgateway" - }, - "VpcId": { - "Ref": "testvpc" - } - }, - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] - igws = vpc_conn.get_all_internet_gateways( - filters={'attachment.vpc-id': vpc.id} - ) - - igws.should.have.length_of(1) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_vpc_peering_creation(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc_source = vpc_conn.create_vpc("10.0.0.0/16") - peer_vpc = vpc_conn.create_vpc("10.1.0.0/16") - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "vpcpeeringconnection": { - "Type": "AWS::EC2::VPCPeeringConnection", - "Properties": { - "PeerVpcId": peer_vpc.id, - "VpcId": vpc_source.id, - } - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - peering_connections = vpc_conn.get_all_vpc_peering_connections() - peering_connections.should.have.length_of(1) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_multiple_security_group_ingress_separate_from_security_group_by_id(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group1": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg1" - } - ] - }, - }, - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": {"Ref": "test-security-group1"}, - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - - security_group1 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg1"})[0] - security_group2 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_ingress_separate_from_security_group_by_id(): - ec2_conn = boto.ec2.connect_to_region("us-west-1") - ec2_conn.create_security_group( - "test-security-group1", "test security group") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupName": "test-security-group1", - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group1 = ec2_conn.get_all_security_groups( - groupnames=["test-security-group1"])[0] - security_group2 = ec2_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group1": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg1" - } - ] - }, - }, - "test-security-group2": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg2" - } - ] - }, - }, - "test-sg-ingress": { - "Type": "AWS::EC2::SecurityGroupIngress", - "Properties": { - "GroupId": {"Ref": "test-security-group1"}, - "VpcId": vpc.id, - "IpProtocol": "tcp", - "FromPort": "80", - "ToPort": "8080", - "SourceSecurityGroupId": {"Ref": "test-security-group2"}, - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group1 = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg1"})[0] - security_group2 = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg2"})[0] - - security_group1.rules.should.have.length_of(1) - security_group1.rules[0].grants.should.have.length_of(1) - security_group1.rules[0].grants[ - 0].group_id.should.equal(security_group2.id) - security_group1.rules[0].ip_protocol.should.equal('tcp') - security_group1.rules[0].from_port.should.equal('80') - security_group1.rules[0].to_port.should.equal('8080') - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_security_group_with_update(): - vpc_conn = boto.vpc.connect_to_region("us-west-1") - vpc1 = vpc_conn.create_vpc("10.0.0.0/16") - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "test-security-group": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupDescription": "test security group", - "VpcId": vpc1.id, - "Tags": [ - { - "Key": "sg-name", - "Value": "sg" - } - ] - }, - }, - } - } - - template_json = json.dumps(template) - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - security_group = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg"})[0] - security_group.vpc_id.should.equal(vpc1.id) - - vpc2 = vpc_conn.create_vpc("10.1.0.0/16") - template['Resources'][ - 'test-security-group']['Properties']['VpcId'] = vpc2.id - template_json = json.dumps(template) - cf_conn.update_stack( - "test_stack", - template_body=template_json, - ) - security_group = vpc_conn.get_all_security_groups( - filters={"tag:sg-name": "sg"})[0] - security_group.vpc_id.should.equal(vpc2.id) - - -@mock_cloudformation_deprecated -@mock_ec2_deprecated -def test_subnets_should_be_created_with_availability_zone(): - vpc_conn = boto.vpc.connect_to_region('us-west-1') - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - } - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] - subnet.availability_zone.should.equal('us-west-1b') - - -@mock_cloudformation_deprecated -@mock_datapipeline_deprecated -def test_datapipeline(): - dp_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "dataPipeline": { - "Properties": { - "Activate": "true", - "Name": "testDataPipeline", - "PipelineObjects": [ - { - "Fields": [ - { - "Key": "failureAndRerunMode", - "StringValue": "CASCADE" - }, - { - "Key": "scheduleType", - "StringValue": "cron" - }, - { - "Key": "schedule", - "RefValue": "DefaultSchedule" - }, - { - "Key": "pipelineLogUri", - "StringValue": "s3://bucket/logs" - }, - { - "Key": "type", - "StringValue": "Default" - }, - ], - "Id": "Default", - "Name": "Default" - }, - { - "Fields": [ - { - "Key": "startDateTime", - "StringValue": "1970-01-01T01:00:00" - }, - { - "Key": "period", - "StringValue": "1 Day" - }, - { - "Key": "type", - "StringValue": "Schedule" - } - ], - "Id": "DefaultSchedule", - "Name": "RunOnce" - } - ], - "PipelineTags": [] - }, - "Type": "AWS::DataPipeline::Pipeline" - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-east-1") - template_json = json.dumps(dp_template) - stack_id = cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - dp_conn = boto.datapipeline.connect_to_region('us-east-1') - data_pipelines = dp_conn.list_pipelines() - - data_pipelines['pipelineIdList'].should.have.length_of(1) - data_pipelines['pipelineIdList'][0][ - 'name'].should.equal('testDataPipeline') - - stack_resources = cf_conn.list_stack_resources(stack_id) - stack_resources.should.have.length_of(1) - stack_resources[0].physical_resource_id.should.equal( - data_pipelines['pipelineIdList'][0]['id']) - - -@mock_cloudformation -@mock_lambda -def test_lambda_function(): - # switch this to python as backend lambda only supports python execution. - lambda_code = """ -def lambda_handler(event, context): - return (event, context) -""" - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "lambdaTest": { - "Type": "AWS::Lambda::Function", - "Properties": { - "Code": { - # CloudFormation expects a string as ZipFile, not a ZIP file base64-encoded - "ZipFile": {"Fn::Join": ["\n", lambda_code.splitlines()]} - }, - "Handler": "lambda_function.handler", - "Description": "Test function", - "MemorySize": 128, - "Role": "test-role", - "Runtime": "python2.7" - } - } - } - } - - template_json = json.dumps(template) - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - conn = boto3.client('lambda', 'us-east-1') - result = conn.list_functions() - result['Functions'].should.have.length_of(1) - result['Functions'][0]['Description'].should.equal('Test function') - result['Functions'][0]['Handler'].should.equal('lambda_function.handler') - result['Functions'][0]['MemorySize'].should.equal(128) - result['Functions'][0]['Role'].should.equal('test-role') - result['Functions'][0]['Runtime'].should.equal('python2.7') - - -@mock_cloudformation -@mock_ec2 -def test_nat_gateway(): - ec2_conn = boto3.client('ec2', 'us-east-1') - vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] - subnet_id = ec2_conn.create_subnet( - CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] - route_table_id = ec2_conn.create_route_table( - VpcId=vpc_id)['RouteTable']['RouteTableId'] - - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "NAT": { - "DependsOn": "vpcgatewayattachment", - "Type": "AWS::EC2::NatGateway", - "Properties": { - "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, - "SubnetId": subnet_id - } - }, - "EIP": { - "Type": "AWS::EC2::EIP", - "Properties": { - "Domain": "vpc" - } - }, - "Route": { - "Type": "AWS::EC2::Route", - "Properties": { - "RouteTableId": route_table_id, - "DestinationCidrBlock": "0.0.0.0/0", - "NatGatewayId": {"Ref": "NAT"} - } - }, - "internetgateway": { - "Type": "AWS::EC2::InternetGateway" - }, - "vpcgatewayattachment": { - "Type": "AWS::EC2::VPCGatewayAttachment", - "Properties": { - "InternetGatewayId": { - "Ref": "internetgateway" - }, - "VpcId": vpc_id, - }, - } - } - } - - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName="test_stack", - TemplateBody=json.dumps(template), - ) - - result = ec2_conn.describe_nat_gateways() - - result['NatGateways'].should.have.length_of(1) - result['NatGateways'][0]['VpcId'].should.equal(vpc_id) - result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) - result['NatGateways'][0]['State'].should.equal('available') - - -@mock_cloudformation() -@mock_kms() -def test_stack_kms(): - kms_key_template = { - 'Resources': { - 'kmskey': { - 'Properties': { - 'Description': 'A kms key', - 'EnableKeyRotation': True, - 'Enabled': True, - 'KeyPolicy': 'a policy', - }, - 'Type': 'AWS::KMS::Key' - } - } - } - kms_key_template_json = json.dumps(kms_key_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - cf_conn.create_stack( - StackName='test_stack', - TemplateBody=kms_key_template_json, - ) - - kms_conn = boto3.client('kms', 'us-east-1') - keys = kms_conn.list_keys()['Keys'] - len(keys).should.equal(1) - result = kms_conn.describe_key(KeyId=keys[0]['KeyId']) - - result['KeyMetadata']['Enabled'].should.equal(True) - result['KeyMetadata']['KeyUsage'].should.equal('ENCRYPT_DECRYPT') - - -@mock_cloudformation() -@mock_ec2() -def test_stack_spot_fleet(): - conn = boto3.client('ec2', 'us-east-1') - - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - spot_fleet_template = { - 'Resources': { - "SpotFleet": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "SpotPrice": "0.12", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - "SpotPrice": "0.13", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": {"Enabled": "true"}, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - "SpotPrice": "10.00", - } - ] - } - } - } - } - } - spot_fleet_template_json = json.dumps(spot_fleet_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - stack_id = cf_conn.create_stack( - StackName='test_stack', - TemplateBody=spot_fleet_template_json, - )['StackId'] - - stack_resources = cf_conn.list_stack_resources(StackName=stack_id) - stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources[ - 'StackResourceSummaries'][0]['PhysicalResourceId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - spot_fleet_config['SpotPrice'].should.equal('0.12') - spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal( - 'arn:aws:iam::123456789012:role/fleet') - spot_fleet_config['AllocationStrategy'].should.equal('diversified') - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec = spot_fleet_config['LaunchSpecifications'][0] - - launch_spec['EbsOptimized'].should.equal(False) - launch_spec['ImageId'].should.equal("ami-1234") - launch_spec['InstanceType'].should.equal("t2.small") - launch_spec['SubnetId'].should.equal(subnet_id) - launch_spec['SpotPrice'].should.equal("0.13") - launch_spec['WeightedCapacity'].should.equal(2.0) - - -@mock_cloudformation() -@mock_ec2() -def test_stack_spot_fleet_should_figure_out_default_price(): - conn = boto3.client('ec2', 'us-east-1') - - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - spot_fleet_template = { - 'Resources': { - "SpotFleet1": { - "Type": "AWS::EC2::SpotFleet", - "Properties": { - "SpotFleetRequestConfigData": { - "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", - "TargetCapacity": 6, - "AllocationStrategy": "diversified", - "LaunchSpecifications": [ - { - "EbsOptimized": "false", - "InstanceType": 't2.small', - "ImageId": "ami-1234", - "SubnetId": subnet_id, - "WeightedCapacity": "2", - }, - { - "EbsOptimized": "true", - "InstanceType": 't2.large', - "ImageId": "ami-1234", - "Monitoring": {"Enabled": "true"}, - "SecurityGroups": [{"GroupId": "sg-123"}], - "SubnetId": subnet_id, - "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, - "WeightedCapacity": "4", - } - ] - } - } - } - } - } - spot_fleet_template_json = json.dumps(spot_fleet_template) - - cf_conn = boto3.client('cloudformation', 'us-east-1') - stack_id = cf_conn.create_stack( - StackName='test_stack', - TemplateBody=spot_fleet_template_json, - )['StackId'] - - stack_resources = cf_conn.list_stack_resources(StackName=stack_id) - stack_resources['StackResourceSummaries'].should.have.length_of(1) - spot_fleet_id = stack_resources[ - 'StackResourceSummaries'][0]['PhysicalResourceId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - assert 'SpotPrice' not in spot_fleet_config - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] - launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] - - assert 'SpotPrice' not in launch_spec1 - assert 'SpotPrice' not in launch_spec2 - - -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_stack_elbv2_resources_integration(): - alb_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Outputs": { - "albdns": { - "Description": "Load balanacer DNS", - "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, - }, - "albname": { - "Description": "Load balancer name", - "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, - }, - "canonicalhostedzoneid": { - "Description": "Load balancer canonical hosted zone ID", - "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, - }, - }, - "Resources": { - "alb": { - "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", - "Properties": { - "Name": "myelbv2", - "Scheme": "internet-facing", - "Subnets": [{ - "Ref": "mysubnet", - }], - "SecurityGroups": [{ - "Ref": "mysg", - }], - "Type": "application", - "IpAddressType": "ipv4", - } - }, - "mytargetgroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "HealthCheckIntervalSeconds": 30, - "HealthCheckPath": "/status", - "HealthCheckPort": 80, - "HealthCheckProtocol": "HTTP", - "HealthCheckTimeoutSeconds": 5, - "HealthyThresholdCount": 30, - "UnhealthyThresholdCount": 5, - "Matcher": { - "HttpCode": "200,201" - }, - "Name": "mytargetgroup1", - "Port": 80, - "Protocol": "HTTP", - "TargetType": "instance", - "Targets": [{ - "Id": { - "Ref": "ec2instance", - "Port": 80, - }, - }], - "VpcId": { - "Ref": "myvpc", - } - } - }, - "mytargetgroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "HealthCheckIntervalSeconds": 30, - "HealthCheckPath": "/status", - "HealthCheckPort": 8080, - "HealthCheckProtocol": "HTTP", - "HealthCheckTimeoutSeconds": 5, - "HealthyThresholdCount": 30, - "UnhealthyThresholdCount": 5, - "Name": "mytargetgroup2", - "Port": 8080, - "Protocol": "HTTP", - "TargetType": "instance", - "Targets": [{ - "Id": { - "Ref": "ec2instance", - "Port": 8080, - }, - }], - "VpcId": { - "Ref": "myvpc", - } - } - }, - "listener": { - "Type": "AWS::ElasticLoadBalancingV2::Listener", - "Properties": { - "DefaultActions": [{ - "Type": "forward", - "TargetGroupArn": {"Ref": "mytargetgroup1"} - }], - "LoadBalancerArn": {"Ref": "alb"}, - "Port": "80", - "Protocol": "HTTP" - } - }, - "myvpc": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - } - }, - "mysubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "CidrBlock": "10.0.0.0/27", - "VpcId": {"Ref": "myvpc"}, - } - }, - "mysg": { - "Type": "AWS::EC2::SecurityGroup", - "Properties": { - "GroupName": "mysg", - "GroupDescription": "test security group", - "VpcId": {"Ref": "myvpc"} - } - }, - "ec2instance": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-1234abcd", - "UserData": "some user data", - } - }, - }, - } - alb_template_json = json.dumps(alb_template) - - cfn_conn = boto3.client("cloudformation", "us-west-1") - cfn_conn.create_stack( - StackName="elb_stack", - TemplateBody=alb_template_json, - ) - - elbv2_conn = boto3.client("elbv2", "us-west-1") - - load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] - len(load_balancers).should.equal(1) - load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') - load_balancers[0]['Scheme'].should.equal('internet-facing') - load_balancers[0]['Type'].should.equal('application') - load_balancers[0]['IpAddressType'].should.equal('ipv4') - - target_groups = sorted( - elbv2_conn.describe_target_groups()['TargetGroups'], - key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes - len(target_groups).should.equal(2) - target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) - target_groups[0]['HealthCheckPath'].should.equal('/status') - target_groups[0]['HealthCheckPort'].should.equal('80') - target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') - target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) - target_groups[0]['HealthyThresholdCount'].should.equal(30) - target_groups[0]['UnhealthyThresholdCount'].should.equal(5) - target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) - target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') - target_groups[0]['Port'].should.equal(80) - target_groups[0]['Protocol'].should.equal('HTTP') - target_groups[0]['TargetType'].should.equal('instance') - - target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) - target_groups[1]['HealthCheckPath'].should.equal('/status') - target_groups[1]['HealthCheckPort'].should.equal('8080') - target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') - target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) - target_groups[1]['HealthyThresholdCount'].should.equal(30) - target_groups[1]['UnhealthyThresholdCount'].should.equal(5) - target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) - target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') - target_groups[1]['Port'].should.equal(8080) - target_groups[1]['Protocol'].should.equal('HTTP') - target_groups[1]['TargetType'].should.equal('instance') - - listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] - len(listeners).should.equal(1) - listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) - listeners[0]['Port'].should.equal(80) - listeners[0]['Protocol'].should.equal('HTTP') - listeners[0]['DefaultActions'].should.equal([{ - "Type": "forward", - "TargetGroupArn": target_groups[0]['TargetGroupArn'] - }]) - - # test outputs - stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] - len(stacks).should.equal(1) - - dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] - name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] - - dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) - name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) +from __future__ import unicode_literals +import json + +import base64 +import boto +import boto.cloudformation +import boto.datapipeline +import boto.ec2 +import boto.ec2.autoscale +import boto.ec2.elb +from boto.exception import BotoServerError +import boto.iam +import boto.redshift +import boto.sns +import boto.sqs +import boto.vpc +import boto3 +import sure # noqa + +from moto import ( + mock_autoscaling_deprecated, + mock_cloudformation, + mock_cloudformation_deprecated, + mock_datapipeline_deprecated, + mock_ec2, + mock_ec2_deprecated, + mock_elb, + mock_elb_deprecated, + mock_iam_deprecated, + mock_kms, + mock_lambda, + mock_rds_deprecated, + mock_rds2, + mock_rds2_deprecated, + mock_redshift, + mock_redshift_deprecated, + mock_route53_deprecated, + mock_sns_deprecated, + mock_sqs, + mock_sqs_deprecated, + mock_elbv2) + +from .fixtures import ( + ec2_classic_eip, + fn_join, + rds_mysql_with_db_parameter_group, + rds_mysql_with_read_replica, + redshift, + route53_ec2_instance_with_public_ip, + route53_health_check, + route53_roundrobin, + single_instance_with_ebs_volume, + vpc_eip, + vpc_single_instance_in_subnet, +) + + +@mock_cloudformation_deprecated() +def test_stack_sqs_integration(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + stack = conn.describe_stacks()[0] + queue = stack.describe_resources()[0] + queue.resource_type.should.equal('AWS::SQS::Queue') + queue.logical_resource_id.should.equal("QueueGroup") + queue.physical_resource_id.should.equal("my-queue") + + +@mock_cloudformation_deprecated() +def test_stack_list_resources(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + resources = conn.list_stack_resources("test_stack") + assert len(resources) == 1 + queue = resources[0] + queue.resource_type.should.equal('AWS::SQS::Queue') + queue.logical_resource_id.should.equal("QueueGroup") + queue.physical_resource_id.should.equal("my-queue") + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('60') + + sqs_template['Resources']['QueueGroup'][ + 'Properties']['VisibilityTimeout'] = 100 + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + queues[0].get_attributes('VisibilityTimeout')[ + 'VisibilityTimeout'].should.equal('100') + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack_and_remove_resource(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + + sqs_template['Resources'].pop('QueueGroup') + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(0) + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_update_stack_and_add_resource(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": {}, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=sqs_template_json, + ) + + sqs_conn = boto.sqs.connect_to_region("us-west-1") + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(0) + + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "QueueGroup": { + + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + conn.update_stack("test_stack", sqs_template_json) + + queues = sqs_conn.get_all_queues() + queues.should.have.length_of(1) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_stack_ec2_integration(): + ec2_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "WebServerGroup": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + ec2_template_json = json.dumps(ec2_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "ec2_stack", + template_body=ec2_template_json, + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + stack = conn.describe_stacks()[0] + instance = stack.describe_resources()[0] + instance.resource_type.should.equal('AWS::EC2::Instance') + instance.logical_resource_id.should.contain("WebServerGroup") + instance.physical_resource_id.should.equal(ec2_instance.id) + + +@mock_ec2_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_attached_ec2_instances(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "Instances": [{"Ref": "Ec2Instance1"}], + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-east-1'], + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + } + }, + "Ec2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + load_balancer.instances[0].id.should.equal(ec2_instance.id) + list(load_balancer.availability_zones).should.equal(['us-east-1']) + + +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_health_check(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-west-1'], + "HealthCheck": { + "HealthyThreshold": "3", + "Interval": "5", + "Target": "HTTP:80/healthcheck", + "Timeout": "4", + "UnhealthyThreshold": "2", + }, + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + health_check = load_balancer.health_check + + health_check.healthy_threshold.should.equal(3) + health_check.interval.should.equal(5) + health_check.target.should.equal("HTTP:80/healthcheck") + health_check.timeout.should.equal(4) + health_check.unhealthy_threshold.should.equal(2) + + +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +def test_stack_elb_integration_with_update(): + elb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MyELB": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "LoadBalancerName": "test-elb", + "AvailabilityZones": ['us-west-1a'], + "Listeners": [ + { + "InstancePort": "80", + "LoadBalancerPort": "80", + "Protocol": "HTTP", + } + ], + "Policies": {"Ref": "AWS::NoValue"}, + } + }, + }, + } + elb_template_json = json.dumps(elb_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "elb_stack", + template_body=elb_template_json, + ) + + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + load_balancer = elb_conn.get_all_load_balancers()[0] + load_balancer.availability_zones[0].should.equal('us-west-1a') + + elb_template['Resources']['MyELB']['Properties'][ + 'AvailabilityZones'] = ['us-west-1b'] + elb_template_json = json.dumps(elb_template) + conn.update_stack( + "elb_stack", + template_body=elb_template_json, + ) + load_balancer = elb_conn.get_all_load_balancers()[0] + load_balancer.availability_zones[0].should.equal('us-west-1b') + + +@mock_ec2_deprecated() +@mock_redshift_deprecated() +@mock_cloudformation_deprecated() +def test_redshift_stack(): + redshift_template_json = json.dumps(redshift.template) + + vpc_conn = boto.vpc.connect_to_region("us-west-2") + conn = boto.cloudformation.connect_to_region("us-west-2") + conn.create_stack( + "redshift_stack", + template_body=redshift_template_json, + parameters=[ + ("DatabaseName", "mydb"), + ("ClusterType", "multi-node"), + ("NumberOfNodes", 2), + ("NodeType", "dw1.xlarge"), + ("MasterUsername", "myuser"), + ("MasterUserPassword", "mypass"), + ("InboundTraffic", "10.0.0.1/16"), + ("PortNumber", 5439), + ] + ) + + redshift_conn = boto.redshift.connect_to_region("us-west-2") + + cluster_res = redshift_conn.describe_clusters() + clusters = cluster_res['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + clusters.should.have.length_of(1) + cluster = clusters[0] + cluster['DBName'].should.equal("mydb") + cluster['NumberOfNodes'].should.equal(2) + cluster['NodeType'].should.equal("dw1.xlarge") + cluster['MasterUsername'].should.equal("myuser") + cluster['Port'].should.equal(5439) + cluster['VpcSecurityGroups'].should.have.length_of(1) + security_group_id = cluster['VpcSecurityGroups'][0]['VpcSecurityGroupId'] + + groups = vpc_conn.get_all_security_groups(group_ids=[security_group_id]) + groups.should.have.length_of(1) + group = groups[0] + group.rules.should.have.length_of(1) + group.rules[0].grants[0].cidr_ip.should.equal("10.0.0.1/16") + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_stack_security_groups(): + security_group_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "my-security-group": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "My other group", + }, + }, + "Ec2Instance2": { + "Type": "AWS::EC2::Instance", + "Properties": { + "SecurityGroups": [{"Ref": "InstanceSecurityGroup"}], + "ImageId": "ami-1234abcd", + } + }, + "InstanceSecurityGroup": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "My security group", + "Tags": [ + { + "Key": "bar", + "Value": "baz" + } + ], + "SecurityGroupIngress": [{ + "IpProtocol": "tcp", + "FromPort": "22", + "ToPort": "22", + "CidrIp": "123.123.123.123/32", + }, { + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8000", + "SourceSecurityGroupId": {"Ref": "my-security-group"}, + }] + } + } + }, + } + security_group_template_json = json.dumps(security_group_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "security_group_stack", + template_body=security_group_template_json, + tags={"foo": "bar"} + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + instance_group = ec2_conn.get_all_security_groups( + filters={'description': ['My security group']})[0] + other_group = ec2_conn.get_all_security_groups( + filters={'description': ['My other group']})[0] + + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + ec2_instance.groups[0].id.should.equal(instance_group.id) + instance_group.description.should.equal("My security group") + instance_group.tags.should.have.key('foo').which.should.equal('bar') + instance_group.tags.should.have.key('bar').which.should.equal('baz') + rule1, rule2 = instance_group.rules + int(rule1.to_port).should.equal(22) + int(rule1.from_port).should.equal(22) + rule1.grants[0].cidr_ip.should.equal("123.123.123.123/32") + rule1.ip_protocol.should.equal('tcp') + + int(rule2.to_port).should.equal(8000) + int(rule2.from_port).should.equal(80) + rule2.ip_protocol.should.equal('tcp') + rule2.grants[0].group_id.should.equal(other_group.id) + + +@mock_autoscaling_deprecated() +@mock_elb_deprecated() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_autoscaling_group_with_elb(): + web_setup_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Resources": { + "my-as-group": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AvailabilityZones": ['us-east1'], + "LaunchConfigurationName": {"Ref": "my-launch-config"}, + "MinSize": "2", + "MaxSize": "2", + "DesiredCapacity": "2", + "LoadBalancerNames": [{"Ref": "my-elb"}], + "Tags": [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] + }, + }, + + "my-launch-config": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + + "my-elb": { + "Type": "AWS::ElasticLoadBalancing::LoadBalancer", + "Properties": { + "AvailabilityZones": ['us-east1'], + "Listeners": [{ + "LoadBalancerPort": "80", + "InstancePort": "80", + "Protocol": "HTTP", + }], + "LoadBalancerName": "my-elb", + "HealthCheck": { + "Target": "HTTP:80", + "HealthyThreshold": "3", + "UnhealthyThreshold": "5", + "Interval": "30", + "Timeout": "5", + }, + }, + }, + } + } + + web_setup_template_json = json.dumps(web_setup_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "web_stack", + template_body=web_setup_template_json, + ) + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + autoscale_group = autoscale_conn.get_all_groups()[0] + autoscale_group.launch_config_name.should.contain("my-launch-config") + autoscale_group.load_balancers[0].should.equal('my-elb') + + # Confirm the Launch config was actually created + autoscale_conn.get_all_launch_configurations().should.have.length_of(1) + + # Confirm the ELB was actually created + elb_conn = boto.ec2.elb.connect_to_region("us-west-1") + elb_conn.get_all_load_balancers().should.have.length_of(1) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + as_group_resource = [resource for resource in resources if resource.resource_type == + 'AWS::AutoScaling::AutoScalingGroup'][0] + as_group_resource.physical_resource_id.should.contain("my-as-group") + + launch_config_resource = [ + resource for resource in resources if + resource.resource_type == 'AWS::AutoScaling::LaunchConfiguration'][0] + launch_config_resource.physical_resource_id.should.contain( + "my-launch-config") + + elb_resource = [resource for resource in resources if resource.resource_type == + 'AWS::ElasticLoadBalancing::LoadBalancer'][0] + elb_resource.physical_resource_id.should.contain("my-elb") + + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + len(reservations).should.equal(1) + reservation = reservations[0] + len(reservation.instances).should.equal(2) + for instance in reservation.instances: + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + + +@mock_autoscaling_deprecated() +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_autoscaling_group_update(): + asg_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "my-as-group": { + "Type": "AWS::AutoScaling::AutoScalingGroup", + "Properties": { + "AvailabilityZones": ['us-west-1'], + "LaunchConfigurationName": {"Ref": "my-launch-config"}, + "MinSize": "2", + "MaxSize": "2", + "DesiredCapacity": "2" + }, + }, + + "my-launch-config": { + "Type": "AWS::AutoScaling::LaunchConfiguration", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + asg_template_json = json.dumps(asg_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "asg_stack", + template_body=asg_template_json, + ) + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + asg = autoscale_conn.get_all_groups()[0] + asg.min_size.should.equal(2) + asg.max_size.should.equal(2) + asg.desired_capacity.should.equal(2) + + asg_template['Resources']['my-as-group']['Properties']['MaxSize'] = 3 + asg_template['Resources']['my-as-group']['Properties']['Tags'] = [ + { + "Key": "propagated-test-tag", "Value": "propagated-test-tag-value", + "PropagateAtLaunch": True}, + { + "Key": "not-propagated-test-tag", + "Value": "not-propagated-test-tag-value", + "PropagateAtLaunch": False + } + ] + asg_template_json = json.dumps(asg_template) + conn.update_stack( + "asg_stack", + template_body=asg_template_json, + ) + asg = autoscale_conn.get_all_groups()[0] + asg.min_size.should.equal(2) + asg.max_size.should.equal(3) + asg.desired_capacity.should.equal(2) + + # confirm the instances were created with the right tags + ec2_conn = boto.ec2.connect_to_region('us-west-1') + reservations = ec2_conn.get_all_reservations() + running_instance_count = 0 + for res in reservations: + for instance in res.instances: + if instance.state == 'running': + running_instance_count += 1 + instance.tags['propagated-test-tag'].should.equal('propagated-test-tag-value') + instance.tags.keys().should_not.contain('not-propagated-test-tag') + running_instance_count.should.equal(2) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_vpc_single_instance_in_subnet(): + template_json = json.dumps(vpc_single_instance_in_subnet.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[("KeyName", "my_key")], + ) + + vpc_conn = boto.vpc.connect_to_region("us-west-1") + + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] + vpc.cidr_block.should.equal("10.0.0.0/16") + + # Add this once we implement the endpoint + # vpc_conn.get_all_internet_gateways().should.have.length_of(1) + + subnet = vpc_conn.get_all_subnets(filters={'vpcId': vpc.id})[0] + subnet.vpc_id.should.equal(vpc.id) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + instance = reservation.instances[0] + instance.tags["Foo"].should.equal("Bar") + # Check that the EIP is attached the the EC2 instance + eip = ec2_conn.get_all_addresses()[0] + eip.domain.should.equal('vpc') + eip.instance_id.should.equal(instance.id) + + security_group = ec2_conn.get_all_security_groups( + filters={'vpc_id': [vpc.id]})[0] + security_group.vpc_id.should.equal(vpc.id) + + stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + + resources = stack.describe_resources() + vpc_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] + vpc_resource.physical_resource_id.should.equal(vpc.id) + + subnet_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Subnet'][0] + subnet_resource.physical_resource_id.should.equal(subnet.id) + + eip_resource = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + eip_resource.physical_resource_id.should.equal(eip.public_ip) + + +@mock_cloudformation() +@mock_ec2() +@mock_rds2() +def test_rds_db_parameter_groups(): + ec2_conn = boto3.client("ec2", region_name="us-west-1") + ec2_conn.create_security_group( + GroupName='application', Description='Our Application Group') + + template_json = json.dumps(rds_mysql_with_db_parameter_group.template) + cf_conn = boto3.client('cloudformation', 'us-west-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + Parameters=[{'ParameterKey': key, 'ParameterValue': value} for + key, value in [ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ] + ], + ) + + rds_conn = boto3.client('rds', region_name="us-west-1") + + db_parameter_groups = rds_conn.describe_db_parameter_groups() + len(db_parameter_groups['DBParameterGroups']).should.equal(1) + db_parameter_group_name = db_parameter_groups[ + 'DBParameterGroups'][0]['DBParameterGroupName'] + + found_cloudformation_set_parameter = False + for db_parameter in rds_conn.describe_db_parameters(DBParameterGroupName=db_parameter_group_name)[ + 'Parameters']: + if db_parameter['ParameterName'] == 'BACKLOG_QUEUE_LIMIT' and db_parameter[ + 'ParameterValue'] == '2048': + found_cloudformation_set_parameter = True + + found_cloudformation_set_parameter.should.equal(True) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() +def test_rds_mysql_with_read_replica(): + ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn.create_security_group('application', 'Our Application Group') + + template_json = json.dumps(rds_mysql_with_read_replica.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("EC2SecurityGroup", "application"), + ("MultiAZ", "true"), + ], + ) + + rds_conn = boto.rds.connect_to_region("us-west-1") + + primary = rds_conn.get_all_dbinstances("master_db")[0] + primary.master_username.should.equal("my_user") + primary.allocated_storage.should.equal(20) + primary.instance_class.should.equal("db.m1.medium") + primary.multi_az.should.equal(True) + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(1) + replica_id = primary.read_replica_dbinstance_identifiers[0] + + replica = rds_conn.get_all_dbinstances(replica_id)[0] + replica.instance_class.should.equal("db.m1.medium") + + security_group_name = primary.security_groups[0].name + security_group = rds_conn.get_all_dbsecurity_groups(security_group_name)[0] + security_group.ec2_groups[0].name.should.equal("application") + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_rds_deprecated() +def test_rds_mysql_with_read_replica_in_vpc(): + template_json = json.dumps(rds_mysql_with_read_replica.template) + conn = boto.cloudformation.connect_to_region("eu-central-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[ + ("DBInstanceIdentifier", "master_db"), + ("DBName", "my_db"), + ("DBUser", "my_user"), + ("DBPassword", "my_password"), + ("DBAllocatedStorage", "20"), + ("DBInstanceClass", "db.m1.medium"), + ("MultiAZ", "true"), + ], + ) + + rds_conn = boto.rds.connect_to_region("eu-central-1") + primary = rds_conn.get_all_dbinstances("master_db")[0] + + subnet_group_name = primary.subnet_group.name + subnet_group = rds_conn.get_all_db_subnet_groups(subnet_group_name)[0] + subnet_group.description.should.equal("my db subnet group") + + +@mock_autoscaling_deprecated() +@mock_iam_deprecated() +@mock_cloudformation_deprecated() +def test_iam_roles(): + iam_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Resources": { + + "my-launch-config": { + "Properties": { + "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, + "ImageId": "ami-1234abcd", + }, + "Type": "AWS::AutoScaling::LaunchConfiguration" + }, + "my-instance-profile-with-path": { + "Properties": { + "Path": "my-path", + "Roles": [{"Ref": "my-role-with-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-instance-profile-no-path": { + "Properties": { + "Roles": [{"Ref": "my-role-no-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-role-with-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + "Path": "my-path", + "Policies": [ + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "ec2:CreateTags", + "ec2:DescribeInstances", + "ec2:DescribeTags" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "EC2_Tags" + }, + { + "PolicyDocument": { + "Statement": [ + { + "Action": [ + "sqs:*" + ], + "Effect": "Allow", + "Resource": [ + "*" + ] + } + ], + "Version": "2012-10-17" + }, + "PolicyName": "SQS" + }, + ] + }, + "Type": "AWS::IAM::Role" + }, + "my-role-no-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + }, + "Type": "AWS::IAM::Role" + } + } + } + + iam_template_json = json.dumps(iam_template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=iam_template_json, + ) + + iam_conn = boto.iam.connect_to_region("us-west-1") + + role_results = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'] + role_name_to_id = {} + for role_result in role_results: + role = iam_conn.get_role(role_result.role_name) + role.role_name.should.contain("my-role") + if 'with-path' in role.role_name: + role_name_to_id['with-path'] = role.role_id + role.path.should.equal("my-path") + else: + role_name_to_id['no-path'] = role.role_id + role.role_name.should.contain('no-path') + role.path.should.equal('/') + + instance_profile_responses = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] + instance_profile_responses.should.have.length_of(2) + instance_profile_names = [] + + for instance_profile_response in instance_profile_responses: + instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) + instance_profile_names.append(instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") + if "with-path" in instance_profile.instance_profile_name: + instance_profile.path.should.equal("my-path") + instance_profile.role_id.should.equal(role_name_to_id['with-path']) + else: + instance_profile.instance_profile_name.should.contain('no-path') + instance_profile.role_id.should.equal(role_name_to_id['no-path']) + instance_profile.path.should.equal('/') + + autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") + launch_config = autoscale_conn.get_all_launch_configurations()[0] + launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + instance_profile_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] + {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) + + role_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] + {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_single_instance_with_ebs_volume(): + template_json = json.dumps(single_instance_with_ebs_volume.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + parameters=[("KeyName", "key_name")] + ) + + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + + volumes = ec2_conn.get_all_volumes() + # Grab the mounted drive + volume = [ + volume for volume in volumes if volume.attach_data.device == '/dev/sdh'][0] + volume.volume_state().should.equal('in-use') + volume.attach_data.instance_id.should.equal(ec2_instance.id) + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + ebs_volumes = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::Volume'] + ebs_volumes[0].physical_resource_id.should.equal(volume.id) + + +@mock_cloudformation_deprecated() +def test_create_template_without_required_param(): + template_json = json.dumps(single_instance_with_ebs_volume.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack.when.called_with( + "test_stack", + template_body=template_json, + ).should.throw(BotoServerError) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_classic_eip(): + template_json = json.dumps(ec2_classic_eip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip.physical_resource_id.should.equal(eip.public_ip) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_vpc_eip(): + template_json = json.dumps(vpc_eip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eip = [ + resource for resource in resources if resource.resource_type == 'AWS::EC2::EIP'][0] + cfn_eip.physical_resource_id.should.equal(eip.public_ip) + + +@mock_ec2_deprecated() +@mock_cloudformation_deprecated() +def test_fn_join(): + template_json = json.dumps(fn_join.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack("test_stack", template_body=template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eip = ec2_conn.get_all_addresses()[0] + + stack = conn.describe_stacks()[0] + fn_join_output = stack.outputs[0] + fn_join_output.value.should.equal('test eip:{0}'.format(eip.public_ip)) + + +@mock_cloudformation_deprecated() +@mock_sqs_deprecated() +def test_conditional_resources(): + sqs_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Parameters": { + "EnvType": { + "Description": "Environment type.", + "Type": "String", + } + }, + "Conditions": { + "CreateQueue": {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]} + }, + "Resources": { + "QueueGroup": { + "Condition": "CreateQueue", + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + }, + } + sqs_template_json = json.dumps(sqs_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack_without_queue", + template_body=sqs_template_json, + parameters=[("EnvType", "staging")], + ) + sqs_conn = boto.sqs.connect_to_region("us-west-1") + list(sqs_conn.get_all_queues()).should.have.length_of(0) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack_with_queue", + template_body=sqs_template_json, + parameters=[("EnvType", "prod")], + ) + sqs_conn = boto.sqs.connect_to_region("us-west-1") + list(sqs_conn.get_all_queues()).should.have.length_of(1) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_conditional_if_handling(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Conditions": { + "EnvEqualsPrd": { + "Fn::Equals": [ + { + "Ref": "ENV" + }, + "prd" + ] + } + }, + "Parameters": { + "ENV": { + "Default": "dev", + "Description": "Deployment environment for the stack (dev/prd)", + "Type": "String" + }, + }, + "Description": "Stack 1", + "Resources": { + "App1": { + "Properties": { + "ImageId": { + "Fn::If": [ + "EnvEqualsPrd", + "ami-00000000", + "ami-ffffffff" + ] + }, + }, + "Type": "AWS::EC2::Instance" + }, + } + } + dummy_template_json = json.dumps(dummy_template) + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-ffffffff") + ec2_instance.terminate() + + conn = boto.cloudformation.connect_to_region("us-west-2") + conn.create_stack( + 'test_stack1', template_body=dummy_template_json, parameters=[("ENV", "prd")]) + ec2_conn = boto.ec2.connect_to_region("us-west-2") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-00000000") + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +def test_cloudformation_mapping(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Mappings": { + "RegionMap": { + "us-east-1": {"32": "ami-6411e20d", "64": "ami-7a11e213"}, + "us-west-1": {"32": "ami-c9c7978c", "64": "ami-cfc7978a"}, + "eu-west-1": {"32": "ami-37c2f643", "64": "ami-31c2f645"}, + "ap-southeast-1": {"32": "ami-66f28c34", "64": "ami-60f28c32"}, + "ap-northeast-1": {"32": "ami-9c03a89d", "64": "ami-a003a8a1"} + } + }, + "Resources": { + "WebServer": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": { + "Fn::FindInMap": ["RegionMap", {"Ref": "AWS::Region"}, "32"] + }, + "InstanceType": "m1.small" + }, + "Type": "AWS::EC2::Instance", + }, + }, + } + + dummy_template_json = json.dumps(dummy_template) + + conn = boto.cloudformation.connect_to_region("us-east-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-east-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-6411e20d") + + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack('test_stack1', template_body=dummy_template_json) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + reservation = ec2_conn.get_all_instances()[0] + ec2_instance = reservation.instances[0] + ec2_instance.image_id.should.equal("ami-c9c7978c") + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_roundrobin(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_roundrobin.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + stack = conn.create_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.hosted_zone_id.should.equal(zone_id) + rrsets.should.have.length_of(2) + record_set1 = rrsets[0] + record_set1.name.should.equal('test_stack.us-west-1.my_zone.') + record_set1.identifier.should.equal("test_stack AWS") + record_set1.type.should.equal('CNAME') + record_set1.ttl.should.equal('900') + record_set1.weight.should.equal('3') + record_set1.resource_records[0].should.equal("aws.amazon.com") + + record_set2 = rrsets[1] + record_set2.name.should.equal('test_stack.us-west-1.my_zone.') + record_set2.identifier.should.equal("test_stack Amazon") + record_set2.type.should.equal('CNAME') + record_set2.ttl.should.equal('900') + record_set2.weight.should.equal('1') + record_set2.resource_records[0].should.equal("www.amazon.com") + + stack = conn.describe_stacks()[0] + output = stack.outputs[0] + output.key.should.equal('DomainName') + output.value.should.equal( + 'arn:aws:route53:::hostedzone/{0}'.format(zone_id)) + + +@mock_cloudformation_deprecated() +@mock_ec2_deprecated() +@mock_route53_deprecated() +def test_route53_ec2_instance_with_public_ip(): + route53_conn = boto.connect_route53() + ec2_conn = boto.ec2.connect_to_region("us-west-1") + + template_json = json.dumps(route53_ec2_instance_with_public_ip.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + + instance_id = ec2_conn.get_all_reservations()[0].instances[0].id + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set1 = rrsets[0] + record_set1.name.should.equal('{0}.us-west-1.my_zone.'.format(instance_id)) + record_set1.identifier.should.equal(None) + record_set1.type.should.equal('A') + record_set1.ttl.should.equal('900') + record_set1.weight.should.equal(None) + record_set1.resource_records[0].should.equal("10.0.0.25") + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_associate_health_check(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_health_check.template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + + checks = route53_conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + check = checks[0] + health_check_id = check['Id'] + config = check['HealthCheckConfig'] + config["FailureThreshold"].should.equal("3") + config["IPAddress"].should.equal("10.0.0.4") + config["Port"].should.equal("80") + config["RequestInterval"].should.equal("10") + config["ResourcePath"].should.equal("/") + config["Type"].should.equal("HTTP") + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.health_check.should.equal(health_check_id) + + +@mock_cloudformation_deprecated() +@mock_route53_deprecated() +def test_route53_with_update(): + route53_conn = boto.connect_route53() + + template_json = json.dumps(route53_health_check.template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.resource_records.should.equal(["my.example.com"]) + + route53_health_check.template['Resources']['myDNSRecord'][ + 'Properties']['ResourceRecords'] = ["my_other.example.com"] + template_json = json.dumps(route53_health_check.template) + cf_conn.update_stack( + "test_stack", + template_body=template_json, + ) + + zones = route53_conn.get_all_hosted_zones()['ListHostedZonesResponse'][ + 'HostedZones'] + list(zones).should.have.length_of(1) + zone_id = zones[0]['Id'] + zone_id = zone_id.split('/') + zone_id = zone_id[2] + + rrsets = route53_conn.get_all_rrsets(zone_id) + rrsets.should.have.length_of(1) + + record_set = rrsets[0] + record_set.resource_records.should.equal(["my_other.example.com"]) + + +@mock_cloudformation_deprecated() +@mock_sns_deprecated() +def test_sns_topic(): + dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "MySNSTopic": { + "Type": "AWS::SNS::Topic", + "Properties": { + "Subscription": [ + {"Endpoint": "https://example.com", "Protocol": "https"}, + ], + "TopicName": "my_topics", + } + } + }, + "Outputs": { + "topic_name": { + "Value": {"Fn::GetAtt": ["MySNSTopic", "TopicName"]} + }, + "topic_arn": { + "Value": {"Ref": "MySNSTopic"} + }, + } + } + template_json = json.dumps(dummy_template) + conn = boto.cloudformation.connect_to_region("us-west-1") + stack = conn.create_stack( + "test_stack", + template_body=template_json, + ) + + sns_conn = boto.sns.connect_to_region("us-west-1") + topics = sns_conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + topics.should.have.length_of(1) + topic_arn = topics[0]['TopicArn'] + topic_arn.should.contain("my_topics") + + subscriptions = sns_conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("https") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("https://example.com") + + stack = conn.describe_stacks()[0] + topic_name_output = [x for x in stack.outputs if x.key == 'topic_name'][0] + topic_name_output.value.should.equal("my_topics") + topic_arn_output = [x for x in stack.outputs if x.key == 'topic_arn'][0] + topic_arn_output.value.should.equal(topic_arn) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_vpc_gateway_attachment_creation_should_attach_itself_to_vpc(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "internetgateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "testvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + "EnableDnsHostnames": "true", + "EnableDnsSupport": "true", + "InstanceTenancy": "default" + }, + }, + "vpcgatewayattachment": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "internetgateway" + }, + "VpcId": { + "Ref": "testvpc" + } + }, + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.get_all_vpcs(filters={'cidrBlock': '10.0.0.0/16'})[0] + igws = vpc_conn.get_all_internet_gateways( + filters={'attachment.vpc-id': vpc.id} + ) + + igws.should.have.length_of(1) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_vpc_peering_creation(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc_source = vpc_conn.create_vpc("10.0.0.0/16") + peer_vpc = vpc_conn.create_vpc("10.1.0.0/16") + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "vpcpeeringconnection": { + "Type": "AWS::EC2::VPCPeeringConnection", + "Properties": { + "PeerVpcId": peer_vpc.id, + "VpcId": vpc_source.id, + } + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + peering_connections = vpc_conn.get_all_vpc_peering_connections() + peering_connections.should.have.length_of(1) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_multiple_security_group_ingress_separate_from_security_group_by_id(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group1": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg1" + } + ] + }, + }, + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": {"Ref": "test-security-group1"}, + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + + security_group1 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_ingress_separate_from_security_group_by_id(): + ec2_conn = boto.ec2.connect_to_region("us-west-1") + ec2_conn.create_security_group( + "test-security-group1", "test security group") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupName": "test-security-group1", + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group1 = ec2_conn.get_all_security_groups( + groupnames=["test-security-group1"])[0] + security_group2 = ec2_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_ingress_separate_from_security_group_by_id_using_vpc(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group1": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg1" + } + ] + }, + }, + "test-security-group2": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg2" + } + ] + }, + }, + "test-sg-ingress": { + "Type": "AWS::EC2::SecurityGroupIngress", + "Properties": { + "GroupId": {"Ref": "test-security-group1"}, + "VpcId": vpc.id, + "IpProtocol": "tcp", + "FromPort": "80", + "ToPort": "8080", + "SourceSecurityGroupId": {"Ref": "test-security-group2"}, + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group1 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg1"})[0] + security_group2 = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg2"})[0] + + security_group1.rules.should.have.length_of(1) + security_group1.rules[0].grants.should.have.length_of(1) + security_group1.rules[0].grants[ + 0].group_id.should.equal(security_group2.id) + security_group1.rules[0].ip_protocol.should.equal('tcp') + security_group1.rules[0].from_port.should.equal('80') + security_group1.rules[0].to_port.should.equal('8080') + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_security_group_with_update(): + vpc_conn = boto.vpc.connect_to_region("us-west-1") + vpc1 = vpc_conn.create_vpc("10.0.0.0/16") + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "test-security-group": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupDescription": "test security group", + "VpcId": vpc1.id, + "Tags": [ + { + "Key": "sg-name", + "Value": "sg" + } + ] + }, + }, + } + } + + template_json = json.dumps(template) + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] + security_group.vpc_id.should.equal(vpc1.id) + + vpc2 = vpc_conn.create_vpc("10.1.0.0/16") + template['Resources'][ + 'test-security-group']['Properties']['VpcId'] = vpc2.id + template_json = json.dumps(template) + cf_conn.update_stack( + "test_stack", + template_body=template_json, + ) + security_group = vpc_conn.get_all_security_groups( + filters={"tag:sg-name": "sg"})[0] + security_group.vpc_id.should.equal(vpc2.id) + + +@mock_cloudformation_deprecated +@mock_ec2_deprecated +def test_subnets_should_be_created_with_availability_zone(): + vpc_conn = boto.vpc.connect_to_region('us-west-1') + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + } + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] + subnet.availability_zone.should.equal('us-west-1b') + + +@mock_cloudformation_deprecated +@mock_datapipeline_deprecated +def test_datapipeline(): + dp_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "dataPipeline": { + "Properties": { + "Activate": "true", + "Name": "testDataPipeline", + "PipelineObjects": [ + { + "Fields": [ + { + "Key": "failureAndRerunMode", + "StringValue": "CASCADE" + }, + { + "Key": "scheduleType", + "StringValue": "cron" + }, + { + "Key": "schedule", + "RefValue": "DefaultSchedule" + }, + { + "Key": "pipelineLogUri", + "StringValue": "s3://bucket/logs" + }, + { + "Key": "type", + "StringValue": "Default" + }, + ], + "Id": "Default", + "Name": "Default" + }, + { + "Fields": [ + { + "Key": "startDateTime", + "StringValue": "1970-01-01T01:00:00" + }, + { + "Key": "period", + "StringValue": "1 Day" + }, + { + "Key": "type", + "StringValue": "Schedule" + } + ], + "Id": "DefaultSchedule", + "Name": "RunOnce" + } + ], + "PipelineTags": [] + }, + "Type": "AWS::DataPipeline::Pipeline" + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-east-1") + template_json = json.dumps(dp_template) + stack_id = cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + dp_conn = boto.datapipeline.connect_to_region('us-east-1') + data_pipelines = dp_conn.list_pipelines() + + data_pipelines['pipelineIdList'].should.have.length_of(1) + data_pipelines['pipelineIdList'][0][ + 'name'].should.equal('testDataPipeline') + + stack_resources = cf_conn.list_stack_resources(stack_id) + stack_resources.should.have.length_of(1) + stack_resources[0].physical_resource_id.should.equal( + data_pipelines['pipelineIdList'][0]['id']) + + +@mock_cloudformation +@mock_lambda +def test_lambda_function(): + # switch this to python as backend lambda only supports python execution. + lambda_code = """ +def lambda_handler(event, context): + return (event, context) +""" + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "lambdaTest": { + "Type": "AWS::Lambda::Function", + "Properties": { + "Code": { + # CloudFormation expects a string as ZipFile, not a ZIP file base64-encoded + "ZipFile": {"Fn::Join": ["\n", lambda_code.splitlines()]} + }, + "Handler": "lambda_function.handler", + "Description": "Test function", + "MemorySize": 128, + "Role": "test-role", + "Runtime": "python2.7" + } + } + } + } + + template_json = json.dumps(template) + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + conn = boto3.client('lambda', 'us-east-1') + result = conn.list_functions() + result['Functions'].should.have.length_of(1) + result['Functions'][0]['Description'].should.equal('Test function') + result['Functions'][0]['Handler'].should.equal('lambda_function.handler') + result['Functions'][0]['MemorySize'].should.equal(128) + result['Functions'][0]['Role'].should.equal('test-role') + result['Functions'][0]['Runtime'].should.equal('python2.7') + + +@mock_cloudformation +@mock_ec2 +def test_nat_gateway(): + ec2_conn = boto3.client('ec2', 'us-east-1') + vpc_id = ec2_conn.create_vpc(CidrBlock="10.0.0.0/16")['Vpc']['VpcId'] + subnet_id = ec2_conn.create_subnet( + CidrBlock='10.0.1.0/24', VpcId=vpc_id)['Subnet']['SubnetId'] + route_table_id = ec2_conn.create_route_table( + VpcId=vpc_id)['RouteTable']['RouteTableId'] + + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "NAT": { + "DependsOn": "vpcgatewayattachment", + "Type": "AWS::EC2::NatGateway", + "Properties": { + "AllocationId": {"Fn::GetAtt": ["EIP", "AllocationId"]}, + "SubnetId": subnet_id + } + }, + "EIP": { + "Type": "AWS::EC2::EIP", + "Properties": { + "Domain": "vpc" + } + }, + "Route": { + "Type": "AWS::EC2::Route", + "Properties": { + "RouteTableId": route_table_id, + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": {"Ref": "NAT"} + } + }, + "internetgateway": { + "Type": "AWS::EC2::InternetGateway" + }, + "vpcgatewayattachment": { + "Type": "AWS::EC2::VPCGatewayAttachment", + "Properties": { + "InternetGatewayId": { + "Ref": "internetgateway" + }, + "VpcId": vpc_id, + }, + } + } + } + + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName="test_stack", + TemplateBody=json.dumps(template), + ) + + result = ec2_conn.describe_nat_gateways() + + result['NatGateways'].should.have.length_of(1) + result['NatGateways'][0]['VpcId'].should.equal(vpc_id) + result['NatGateways'][0]['SubnetId'].should.equal(subnet_id) + result['NatGateways'][0]['State'].should.equal('available') + + +@mock_cloudformation() +@mock_kms() +def test_stack_kms(): + kms_key_template = { + 'Resources': { + 'kmskey': { + 'Properties': { + 'Description': 'A kms key', + 'EnableKeyRotation': True, + 'Enabled': True, + 'KeyPolicy': 'a policy', + }, + 'Type': 'AWS::KMS::Key' + } + } + } + kms_key_template_json = json.dumps(kms_key_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + cf_conn.create_stack( + StackName='test_stack', + TemplateBody=kms_key_template_json, + ) + + kms_conn = boto3.client('kms', 'us-east-1') + keys = kms_conn.list_keys()['Keys'] + len(keys).should.equal(1) + result = kms_conn.describe_key(KeyId=keys[0]['KeyId']) + + result['KeyMetadata']['Enabled'].should.equal(True) + result['KeyMetadata']['KeyUsage'].should.equal('ENCRYPT_DECRYPT') + + +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "SpotPrice": "0.12", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + "SpotPrice": "0.13", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + "SpotPrice": "10.00", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + spot_fleet_config['SpotPrice'].should.equal('0.12') + spot_fleet_config['TargetCapacity'].should.equal(6) + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['AllocationStrategy'].should.equal('diversified') + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec = spot_fleet_config['LaunchSpecifications'][0] + + launch_spec['EbsOptimized'].should.equal(False) + launch_spec['ImageId'].should.equal("ami-1234") + launch_spec['InstanceType'].should.equal("t2.small") + launch_spec['SubnetId'].should.equal(subnet_id) + launch_spec['SpotPrice'].should.equal("0.13") + launch_spec['WeightedCapacity'].should.equal(2.0) + + +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet_should_figure_out_default_price(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet1": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + assert 'SpotPrice' not in spot_fleet_config + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_stack_elbv2_resources_integration(): + alb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "albdns": { + "Description": "Load balanacer DNS", + "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, + }, + "albname": { + "Description": "Load balancer name", + "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, + }, + "canonicalhostedzoneid": { + "Description": "Load balancer canonical hosted zone ID", + "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, + }, + }, + "Resources": { + "alb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "myelbv2", + "Scheme": "internet-facing", + "Subnets": [{ + "Ref": "mysubnet", + }], + "SecurityGroups": [{ + "Ref": "mysg", + }], + "Type": "application", + "IpAddressType": "ipv4", + } + }, + "mytargetgroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 80, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Matcher": { + "HttpCode": "200,201" + }, + "Name": "mytargetgroup1", + "Port": 80, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 80, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "mytargetgroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 8080, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Name": "mytargetgroup2", + "Port": 8080, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 8080, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "listener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "DefaultActions": [{ + "Type": "forward", + "TargetGroupArn": {"Ref": "mytargetgroup1"} + }], + "LoadBalancerArn": {"Ref": "alb"}, + "Port": "80", + "Protocol": "HTTP" + } + }, + "myvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + "mysubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/27", + "VpcId": {"Ref": "myvpc"}, + } + }, + "mysg": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "mysg", + "GroupDescription": "test security group", + "VpcId": {"Ref": "myvpc"} + } + }, + "ec2instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + alb_template_json = json.dumps(alb_template) + + cfn_conn = boto3.client("cloudformation", "us-west-1") + cfn_conn.create_stack( + StackName="elb_stack", + TemplateBody=alb_template_json, + ) + + elbv2_conn = boto3.client("elbv2", "us-west-1") + + load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] + len(load_balancers).should.equal(1) + load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') + load_balancers[0]['Scheme'].should.equal('internet-facing') + load_balancers[0]['Type'].should.equal('application') + load_balancers[0]['IpAddressType'].should.equal('ipv4') + + target_groups = sorted( + elbv2_conn.describe_target_groups()['TargetGroups'], + key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes + len(target_groups).should.equal(2) + target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[0]['HealthCheckPath'].should.equal('/status') + target_groups[0]['HealthCheckPort'].should.equal('80') + target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[0]['HealthyThresholdCount'].should.equal(30) + target_groups[0]['UnhealthyThresholdCount'].should.equal(5) + target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) + target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') + target_groups[0]['Port'].should.equal(80) + target_groups[0]['Protocol'].should.equal('HTTP') + target_groups[0]['TargetType'].should.equal('instance') + + target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[1]['HealthCheckPath'].should.equal('/status') + target_groups[1]['HealthCheckPort'].should.equal('8080') + target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[1]['HealthyThresholdCount'].should.equal(30) + target_groups[1]['UnhealthyThresholdCount'].should.equal(5) + target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) + target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') + target_groups[1]['Port'].should.equal(8080) + target_groups[1]['Protocol'].should.equal('HTTP') + target_groups[1]['TargetType'].should.equal('instance') + + listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] + len(listeners).should.equal(1) + listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) + listeners[0]['Port'].should.equal(80) + listeners[0]['Protocol'].should.equal('HTTP') + listeners[0]['DefaultActions'].should.equal([{ + "Type": "forward", + "TargetGroupArn": target_groups[0]['TargetGroupArn'] + }]) + + # test outputs + stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] + len(stacks).should.equal(1) + + dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] + name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] + + dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) + name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) diff --git a/tests/test_cloudformation/test_import_value.py b/tests/test_cloudformation/test_import_value.py index 04c2b5801..d702753a6 100644 --- a/tests/test_cloudformation/test_import_value.py +++ b/tests/test_cloudformation/test_import_value.py @@ -1,87 +1,87 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function, unicode_literals - -# Standard library modules -import unittest - -# Third-party modules -import boto3 -from botocore.exceptions import ClientError - -# Package modules -from moto import mock_cloudformation - -AWS_REGION = 'us-west-1' - -SG_STACK_NAME = 'simple-sg-stack' -SG_TEMPLATE = """ -AWSTemplateFormatVersion: 2010-09-09 -Description: Simple test CF template for moto_cloudformation - - -Resources: - SimpleSecurityGroup: - Type: AWS::EC2::SecurityGroup - Description: "A simple security group" - Properties: - GroupName: simple-security-group - GroupDescription: "A simple security group" - SecurityGroupEgress: - - - Description: "Egress to remote HTTPS servers" - CidrIp: 0.0.0.0/0 - IpProtocol: tcp - FromPort: 443 - ToPort: 443 - -Outputs: - SimpleSecurityGroupName: - Value: !GetAtt SimpleSecurityGroup.GroupId - Export: - Name: "SimpleSecurityGroup" - -""" - -EC2_STACK_NAME = 'simple-ec2-stack' -EC2_TEMPLATE = """ ---- -# The latest template format version is "2010-09-09" and as of 2018-04-09 -# is currently the only valid value. -AWSTemplateFormatVersion: 2010-09-09 -Description: Simple test CF template for moto_cloudformation - - -Resources: - SimpleInstance: - Type: AWS::EC2::Instance - Properties: - ImageId: ami-03cf127a - InstanceType: t2.micro - SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup] -""" - - -class TestSimpleInstance(unittest.TestCase): - def test_simple_instance(self): - """Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack""" - with mock_cloudformation(): - client = boto3.client('cloudformation', region_name=AWS_REGION) - client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE) - response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) - self.assertIn('StackId', response) - response = client.describe_stacks(StackName=response['StackId']) - self.assertIn('Stacks', response) - stack_info = response['Stacks'] - self.assertEqual(1, len(stack_info)) - self.assertIn('StackName', stack_info[0]) - self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName']) - - def test_simple_instance_missing_export(self): - """Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value""" - with mock_cloudformation(): - client = boto3.client('cloudformation', region_name=AWS_REGION) - with self.assertRaises(ClientError) as e: - client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) - self.assertIn('Error', e.exception.response) - self.assertIn('Code', e.exception.response['Error']) - self.assertEqual('ExportNotFound', e.exception.response['Error']['Code']) +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function, unicode_literals + +# Standard library modules +import unittest + +# Third-party modules +import boto3 +from botocore.exceptions import ClientError + +# Package modules +from moto import mock_cloudformation + +AWS_REGION = 'us-west-1' + +SG_STACK_NAME = 'simple-sg-stack' +SG_TEMPLATE = """ +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleSecurityGroup: + Type: AWS::EC2::SecurityGroup + Description: "A simple security group" + Properties: + GroupName: simple-security-group + GroupDescription: "A simple security group" + SecurityGroupEgress: + - + Description: "Egress to remote HTTPS servers" + CidrIp: 0.0.0.0/0 + IpProtocol: tcp + FromPort: 443 + ToPort: 443 + +Outputs: + SimpleSecurityGroupName: + Value: !GetAtt SimpleSecurityGroup.GroupId + Export: + Name: "SimpleSecurityGroup" + +""" + +EC2_STACK_NAME = 'simple-ec2-stack' +EC2_TEMPLATE = """ +--- +# The latest template format version is "2010-09-09" and as of 2018-04-09 +# is currently the only valid value. +AWSTemplateFormatVersion: 2010-09-09 +Description: Simple test CF template for moto_cloudformation + + +Resources: + SimpleInstance: + Type: AWS::EC2::Instance + Properties: + ImageId: ami-03cf127a + InstanceType: t2.micro + SecurityGroups: !Split [',', !ImportValue SimpleSecurityGroup] +""" + + +class TestSimpleInstance(unittest.TestCase): + def test_simple_instance(self): + """Test that we can create a simple CloudFormation stack that imports values from an existing CloudFormation stack""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + client.create_stack(StackName=SG_STACK_NAME, TemplateBody=SG_TEMPLATE) + response = client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('StackId', response) + response = client.describe_stacks(StackName=response['StackId']) + self.assertIn('Stacks', response) + stack_info = response['Stacks'] + self.assertEqual(1, len(stack_info)) + self.assertIn('StackName', stack_info[0]) + self.assertEqual(EC2_STACK_NAME, stack_info[0]['StackName']) + + def test_simple_instance_missing_export(self): + """Test that we get an exception if a CloudFormation stack tries to imports a non-existent export value""" + with mock_cloudformation(): + client = boto3.client('cloudformation', region_name=AWS_REGION) + with self.assertRaises(ClientError) as e: + client.create_stack(StackName=EC2_STACK_NAME, TemplateBody=EC2_TEMPLATE) + self.assertIn('Error', e.exception.response) + self.assertIn('Code', e.exception.response['Error']) + self.assertEqual('ExportNotFound', e.exception.response['Error']['Code']) diff --git a/tests/test_cloudformation/test_server.py b/tests/test_cloudformation/test_server.py index de3ab77b5..11f810357 100644 --- a/tests/test_cloudformation/test_server.py +++ b/tests/test_cloudformation/test_server.py @@ -1,33 +1,33 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import urlencode -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_cloudformation_server_get(): - backend = server.create_backend_app("cloudformation") - stack_name = 'test stack' - test_client = backend.test_client() - template_body = { - "Resources": {}, - } - create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, - TemplateBody=json.dumps(template_body)) - create_stack_resp.should.match( - r".*.*.*.*.*", re.DOTALL) - stack_id_from_create_response = re.search( - "(.*)", create_stack_resp).groups()[0] - - list_stacks_resp = test_client.action_data("ListStacks") - stack_id_from_list_response = re.search( - "(.*)", list_stacks_resp).groups()[0] - - stack_id_from_create_response.should.equal(stack_id_from_list_response) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import urlencode +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_cloudformation_server_get(): + backend = server.create_backend_app("cloudformation") + stack_name = 'test stack' + test_client = backend.test_client() + template_body = { + "Resources": {}, + } + create_stack_resp = test_client.action_data("CreateStack", StackName=stack_name, + TemplateBody=json.dumps(template_body)) + create_stack_resp.should.match( + r".*.*.*.*.*", re.DOTALL) + stack_id_from_create_response = re.search( + "(.*)", create_stack_resp).groups()[0] + + list_stacks_resp = test_client.action_data("ListStacks") + stack_id_from_list_response = re.search( + "(.*)", list_stacks_resp).groups()[0] + + stack_id_from_create_response.should.equal(stack_id_from_list_response) diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d25c69cf1..9aea55f54 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -1,471 +1,471 @@ -from __future__ import unicode_literals -import json -import yaml - -from mock import patch -import sure # noqa - -from moto.cloudformation.exceptions import ValidationError -from moto.cloudformation.models import FakeStack -from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export -from moto.sqs.models import Queue -from moto.s3.models import FakeBucket -from moto.cloudformation.utils import yaml_tag_constructor -from boto.cloudformation.stack import Output - - - -dummy_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": "my-queue", - "VisibilityTimeout": 60, - } - }, - "S3Bucket": { - "Type": "AWS::S3::Bucket", - "DeletionPolicy": "Retain" - }, - }, -} - -name_type_template = { - "AWSTemplateFormatVersion": "2010-09-09", - - "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", - - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "VisibilityTimeout": 60, - } - }, - }, -} - -output_dict = { - "Outputs": { - "Output1": { - "Value": {"Ref": "Queue"}, - "Description": "This is a description." - } - } -} - -bad_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]} - } - } -} - -get_attribute_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAtt": ["Queue", "QueueName"]} - } - } -} - -get_availability_zones_output = { - "Outputs": { - "Output1": { - "Value": {"Fn::GetAZs": ""} - } - } -} - -split_select_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] }, - "VisibilityTimeout": 60, - } - } - } -} - -sub_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue1": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'}, - "VisibilityTimeout": 60, - } - }, - "Queue2": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${Queue1.QueueName}'}, - "VisibilityTimeout": 60, - } - }, - } -} - -export_value_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::Sub": '${AWS::StackName}-queue'}, - "VisibilityTimeout": 60, - } - } - }, - "Outputs": { - "Output1": { - "Value": "value", - "Export": {"Name": 'queue-us-west-1'} - } - } -} - -import_value_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "Queue": { - "Type": "AWS::SQS::Queue", - "Properties": { - "QueueName": {"Fn::ImportValue": 'queue-us-west-1'}, - "VisibilityTimeout": 60, - } - } - } -} - -outputs_template = dict(list(dummy_template.items()) + - list(output_dict.items())) -bad_outputs_template = dict( - list(dummy_template.items()) + list(bad_output.items())) -get_attribute_outputs_template = dict( - list(dummy_template.items()) + list(get_attribute_output.items())) -get_availability_zones_template = dict( - list(dummy_template.items()) + list(get_availability_zones_output.items())) - -dummy_template_json = json.dumps(dummy_template) -name_type_template_json = json.dumps(name_type_template) -output_type_template_json = json.dumps(outputs_template) -bad_output_template_json = json.dumps(bad_outputs_template) -get_attribute_outputs_template_json = json.dumps( - get_attribute_outputs_template) -get_availability_zones_template_json = json.dumps( - get_availability_zones_template) -split_select_template_json = json.dumps(split_select_template) -sub_template_json = json.dumps(sub_template) -export_value_template_json = json.dumps(export_value_template) -import_value_template_json = json.dumps(import_value_template) - - -def test_parse_stack_resources(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=dummy_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(2) - - queue = stack.resource_map['Queue'] - queue.should.be.a(Queue) - queue.name.should.equal("my-queue") - - bucket = stack.resource_map['S3Bucket'] - bucket.should.be.a(FakeBucket) - bucket.physical_resource_id.should.equal(bucket.name) - - -@patch("moto.cloudformation.parsing.logger") -def test_missing_resource_logs(logger): - resource_class_from_type("foobar") - logger.warning.assert_called_with( - 'No Moto CloudFormation support for %s', 'foobar') - - -def test_parse_stack_with_name_type_resource(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=name_type_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - list(stack.resource_map.keys())[0].should.equal('Queue') - queue = list(stack.resource_map.values())[0] - queue.should.be.a(Queue) - - -def test_parse_stack_with_yaml_template(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=yaml.dump(name_type_template), - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - list(stack.resource_map.keys())[0].should.equal('Queue') - queue = list(stack.resource_map.values())[0] - queue.should.be.a(Queue) - - -def test_parse_stack_with_outputs(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=output_type_template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.description.should.equal("This is a description.") - - -def test_parse_stack_with_get_attribute_outputs(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=get_attribute_outputs_template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.value.should.equal("my-queue") - -def test_parse_stack_with_get_attribute_kms(): - from .fixtures.kms_key import template - - template_json = json.dumps(template) - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=template_json, - parameters={}, - region_name='us-west-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('KeyArn') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - -def test_parse_stack_with_get_availability_zones(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=get_availability_zones_template_json, - parameters={}, - region_name='us-east-1') - - stack.output_map.should.have.length_of(1) - list(stack.output_map.keys())[0].should.equal('Output1') - output = list(stack.output_map.values())[0] - output.should.be.a(Output) - output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) - - -def test_parse_stack_with_bad_get_attribute_outputs(): - FakeStack.when.called_with( - "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) - - -def test_parse_equals_condition(): - parse_condition( - condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - parse_condition( - condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - resources_map={"EnvType": "staging"}, - condition_map={}, - ).should.equal(False) - - -def test_parse_not_condition(): - parse_condition( - condition={ - "Fn::Not": [{ - "Fn::Equals": [{"Ref": "EnvType"}, "prod"] - }] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - parse_condition( - condition={ - "Fn::Not": [{ - "Fn::Equals": [{"Ref": "EnvType"}, "prod"] - }] - }, - resources_map={"EnvType": "staging"}, - condition_map={}, - ).should.equal(True) - - -def test_parse_and_condition(): - parse_condition( - condition={ - "Fn::And": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - parse_condition( - condition={ - "Fn::And": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - -def test_parse_or_condition(): - parse_condition( - condition={ - "Fn::Or": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(True) - - parse_condition( - condition={ - "Fn::Or": [ - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, - ] - }, - resources_map={"EnvType": "prod"}, - condition_map={}, - ).should.equal(False) - - -def test_reference_other_conditions(): - parse_condition( - condition={"Fn::Not": [{"Condition": "OtherCondition"}]}, - resources_map={}, - condition_map={"OtherCondition": True}, - ).should.equal(False) - - -def test_parse_split_and_select(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=split_select_template_json, - parameters={}, - region_name='us-west-1') - - stack.resource_map.should.have.length_of(1) - queue = stack.resource_map['Queue'] - queue.name.should.equal("myqueue") - - -def test_sub(): - stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=sub_template_json, - parameters={}, - region_name='us-west-1') - - queue1 = stack.resource_map['Queue1'] - queue2 = stack.resource_map['Queue2'] - queue2.name.should.equal(queue1.name) - - -def test_import(): - export_stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=export_value_template_json, - parameters={}, - region_name='us-west-1') - import_stack = FakeStack( - stack_id="test_id", - name="test_stack", - template=import_value_template_json, - parameters={}, - region_name='us-west-1', - cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]}) - - queue = import_stack.resource_map['Queue'] - queue.name.should.equal("value") - - - -def test_short_form_func_in_yaml_teamplate(): - template = """--- - KeyB64: !Base64 valueToEncode - KeyRef: !Ref foo - KeyAnd: !And - - A - - B - KeyEquals: !Equals [A, B] - KeyIf: !If [A, B, C] - KeyNot: !Not [A] - KeyOr: !Or [A, B] - KeyFindInMap: !FindInMap [A, B, C] - KeyGetAtt: !GetAtt A.B - KeyGetAZs: !GetAZs A - KeyImportValue: !ImportValue A - KeyJoin: !Join [ ":", [A, B, C] ] - KeySelect: !Select [A, B] - KeySplit: !Split [A, B] - KeySub: !Sub A - """ - yaml.add_multi_constructor('', yaml_tag_constructor) - template_dict = yaml.load(template) - key_and_expects = [ - ['KeyRef', {'Ref': 'foo'}], - ['KeyB64', {'Fn::Base64': 'valueToEncode'}], - ['KeyAnd', {'Fn::And': ['A', 'B']}], - ['KeyEquals', {'Fn::Equals': ['A', 'B']}], - ['KeyIf', {'Fn::If': ['A', 'B', 'C']}], - ['KeyNot', {'Fn::Not': ['A']}], - ['KeyOr', {'Fn::Or': ['A', 'B']}], - ['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}], - ['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}], - ['KeyGetAZs', {'Fn::GetAZs': 'A'}], - ['KeyImportValue', {'Fn::ImportValue': 'A'}], - ['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}], - ['KeySelect', {'Fn::Select': ['A', 'B']}], - ['KeySplit', {'Fn::Split': ['A', 'B']}], - ['KeySub', {'Fn::Sub': 'A'}], - ] - for k, v in key_and_expects: - template_dict.should.have.key(k).which.should.be.equal(v) +from __future__ import unicode_literals +import json +import yaml + +from mock import patch +import sure # noqa + +from moto.cloudformation.exceptions import ValidationError +from moto.cloudformation.models import FakeStack +from moto.cloudformation.parsing import resource_class_from_type, parse_condition, Export +from moto.sqs.models import Queue +from moto.s3.models import FakeBucket +from moto.cloudformation.utils import yaml_tag_constructor +from boto.cloudformation.stack import Output + + + +dummy_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", + + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": "my-queue", + "VisibilityTimeout": 60, + } + }, + "S3Bucket": { + "Type": "AWS::S3::Bucket", + "DeletionPolicy": "Retain" + }, + }, +} + +name_type_template = { + "AWSTemplateFormatVersion": "2010-09-09", + + "Description": "Create a multi-az, load balanced, Auto Scaled sample web site. The Auto Scaling trigger is based on the CPU utilization of the web servers. The AMI is chosen based on the region in which the stack is run. This example creates a web service running across all availability zones in a region. The instances are load balanced with a simple health check. The web site is available on port 80, however, the instances can be configured to listen on any port (8888 by default). **WARNING** This template creates one or more Amazon EC2 instances. You will be billed for the AWS resources used if you create a stack from this template.", + + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "VisibilityTimeout": 60, + } + }, + }, +} + +output_dict = { + "Outputs": { + "Output1": { + "Value": {"Ref": "Queue"}, + "Description": "This is a description." + } + } +} + +bad_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAtt": ["Queue", "InvalidAttribute"]} + } + } +} + +get_attribute_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAtt": ["Queue", "QueueName"]} + } + } +} + +get_availability_zones_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAZs": ""} + } + } +} + +split_select_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Select": [ "1", {"Fn::Split": [ "-", "123-myqueue" ] } ] }, + "VisibilityTimeout": 60, + } + } + } +} + +sub_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue1": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue-${!Literal}'}, + "VisibilityTimeout": 60, + } + }, + "Queue2": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${Queue1.QueueName}'}, + "VisibilityTimeout": 60, + } + }, + } +} + +export_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::Sub": '${AWS::StackName}-queue'}, + "VisibilityTimeout": 60, + } + } + }, + "Outputs": { + "Output1": { + "Value": "value", + "Export": {"Name": 'queue-us-west-1'} + } + } +} + +import_value_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "Queue": { + "Type": "AWS::SQS::Queue", + "Properties": { + "QueueName": {"Fn::ImportValue": 'queue-us-west-1'}, + "VisibilityTimeout": 60, + } + } + } +} + +outputs_template = dict(list(dummy_template.items()) + + list(output_dict.items())) +bad_outputs_template = dict( + list(dummy_template.items()) + list(bad_output.items())) +get_attribute_outputs_template = dict( + list(dummy_template.items()) + list(get_attribute_output.items())) +get_availability_zones_template = dict( + list(dummy_template.items()) + list(get_availability_zones_output.items())) + +dummy_template_json = json.dumps(dummy_template) +name_type_template_json = json.dumps(name_type_template) +output_type_template_json = json.dumps(outputs_template) +bad_output_template_json = json.dumps(bad_outputs_template) +get_attribute_outputs_template_json = json.dumps( + get_attribute_outputs_template) +get_availability_zones_template_json = json.dumps( + get_availability_zones_template) +split_select_template_json = json.dumps(split_select_template) +sub_template_json = json.dumps(sub_template) +export_value_template_json = json.dumps(export_value_template) +import_value_template_json = json.dumps(import_value_template) + + +def test_parse_stack_resources(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=dummy_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(2) + + queue = stack.resource_map['Queue'] + queue.should.be.a(Queue) + queue.name.should.equal("my-queue") + + bucket = stack.resource_map['S3Bucket'] + bucket.should.be.a(FakeBucket) + bucket.physical_resource_id.should.equal(bucket.name) + + +@patch("moto.cloudformation.parsing.logger") +def test_missing_resource_logs(logger): + resource_class_from_type("foobar") + logger.warning.assert_called_with( + 'No Moto CloudFormation support for %s', 'foobar') + + +def test_parse_stack_with_name_type_resource(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=name_type_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal('Queue') + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + +def test_parse_stack_with_yaml_template(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=yaml.dump(name_type_template), + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + list(stack.resource_map.keys())[0].should.equal('Queue') + queue = list(stack.resource_map.values())[0] + queue.should.be.a(Queue) + + +def test_parse_stack_with_outputs(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=output_type_template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.description.should.equal("This is a description.") + + +def test_parse_stack_with_get_attribute_outputs(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_attribute_outputs_template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal("my-queue") + +def test_parse_stack_with_get_attribute_kms(): + from .fixtures.kms_key import template + + template_json = json.dumps(template) + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=template_json, + parameters={}, + region_name='us-west-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('KeyArn') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + +def test_parse_stack_with_get_availability_zones(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_availability_zones_template_json, + parameters={}, + region_name='us-east-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) + + +def test_parse_stack_with_bad_get_attribute_outputs(): + FakeStack.when.called_with( + "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) + + +def test_parse_equals_condition(): + parse_condition( + condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + parse_condition( + condition={"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + resources_map={"EnvType": "staging"}, + condition_map={}, + ).should.equal(False) + + +def test_parse_not_condition(): + parse_condition( + condition={ + "Fn::Not": [{ + "Fn::Equals": [{"Ref": "EnvType"}, "prod"] + }] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + parse_condition( + condition={ + "Fn::Not": [{ + "Fn::Equals": [{"Ref": "EnvType"}, "prod"] + }] + }, + resources_map={"EnvType": "staging"}, + condition_map={}, + ).should.equal(True) + + +def test_parse_and_condition(): + parse_condition( + condition={ + "Fn::And": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + parse_condition( + condition={ + "Fn::And": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + +def test_parse_or_condition(): + parse_condition( + condition={ + "Fn::Or": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "prod"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(True) + + parse_condition( + condition={ + "Fn::Or": [ + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + {"Fn::Equals": [{"Ref": "EnvType"}, "staging"]}, + ] + }, + resources_map={"EnvType": "prod"}, + condition_map={}, + ).should.equal(False) + + +def test_reference_other_conditions(): + parse_condition( + condition={"Fn::Not": [{"Condition": "OtherCondition"}]}, + resources_map={}, + condition_map={"OtherCondition": True}, + ).should.equal(False) + + +def test_parse_split_and_select(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=split_select_template_json, + parameters={}, + region_name='us-west-1') + + stack.resource_map.should.have.length_of(1) + queue = stack.resource_map['Queue'] + queue.name.should.equal("myqueue") + + +def test_sub(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=sub_template_json, + parameters={}, + region_name='us-west-1') + + queue1 = stack.resource_map['Queue1'] + queue2 = stack.resource_map['Queue2'] + queue2.name.should.equal(queue1.name) + + +def test_import(): + export_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=export_value_template_json, + parameters={}, + region_name='us-west-1') + import_stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=import_value_template_json, + parameters={}, + region_name='us-west-1', + cross_stack_resources={export_stack.exports[0].value: export_stack.exports[0]}) + + queue = import_stack.resource_map['Queue'] + queue.name.should.equal("value") + + + +def test_short_form_func_in_yaml_teamplate(): + template = """--- + KeyB64: !Base64 valueToEncode + KeyRef: !Ref foo + KeyAnd: !And + - A + - B + KeyEquals: !Equals [A, B] + KeyIf: !If [A, B, C] + KeyNot: !Not [A] + KeyOr: !Or [A, B] + KeyFindInMap: !FindInMap [A, B, C] + KeyGetAtt: !GetAtt A.B + KeyGetAZs: !GetAZs A + KeyImportValue: !ImportValue A + KeyJoin: !Join [ ":", [A, B, C] ] + KeySelect: !Select [A, B] + KeySplit: !Split [A, B] + KeySub: !Sub A + """ + yaml.add_multi_constructor('', yaml_tag_constructor) + template_dict = yaml.load(template) + key_and_expects = [ + ['KeyRef', {'Ref': 'foo'}], + ['KeyB64', {'Fn::Base64': 'valueToEncode'}], + ['KeyAnd', {'Fn::And': ['A', 'B']}], + ['KeyEquals', {'Fn::Equals': ['A', 'B']}], + ['KeyIf', {'Fn::If': ['A', 'B', 'C']}], + ['KeyNot', {'Fn::Not': ['A']}], + ['KeyOr', {'Fn::Or': ['A', 'B']}], + ['KeyFindInMap', {'Fn::FindInMap': ['A', 'B', 'C']}], + ['KeyGetAtt', {'Fn::GetAtt': ['A', 'B']}], + ['KeyGetAZs', {'Fn::GetAZs': 'A'}], + ['KeyImportValue', {'Fn::ImportValue': 'A'}], + ['KeyJoin', {'Fn::Join': [ ":", [ 'A', 'B', 'C' ] ]}], + ['KeySelect', {'Fn::Select': ['A', 'B']}], + ['KeySplit', {'Fn::Split': ['A', 'B']}], + ['KeySub', {'Fn::Sub': 'A'}], + ] + for k, v in key_and_expects: + template_dict.should.have.key(k).which.should.be.equal(v) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index a0f3871c0..2ba233735 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -1,123 +1,123 @@ -import boto -from boto.ec2.cloudwatch.alarm import MetricAlarm -import boto3 -from datetime import datetime, timedelta -import pytz -import sure # noqa - -from moto import mock_cloudwatch_deprecated - - -def alarm_fixture(name="tester", action=None): - action = action or ['arn:alarm'] - return MetricAlarm( - name=name, - namespace="{0}_namespace".format(name), - metric="{0}_metric".format(name), - comparison='>=', - threshold=2.0, - period=60, - evaluation_periods=5, - statistic='Average', - description='A test', - dimensions={'InstanceId': ['i-0123456,i-0123457']}, - alarm_actions=action, - ok_actions=['arn:ok'], - insufficient_data_actions=['arn:insufficient'], - unit='Seconds', - ) - - -@mock_cloudwatch_deprecated -def test_create_alarm(): - conn = boto.connect_cloudwatch() - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - alarm = alarms[0] - alarm.name.should.equal('tester') - alarm.namespace.should.equal('tester_namespace') - alarm.metric.should.equal('tester_metric') - alarm.comparison.should.equal('>=') - alarm.threshold.should.equal(2.0) - alarm.period.should.equal(60) - alarm.evaluation_periods.should.equal(5) - alarm.statistic.should.equal('Average') - alarm.description.should.equal('A test') - dict(alarm.dimensions).should.equal( - {'InstanceId': ['i-0123456,i-0123457']}) - list(alarm.alarm_actions).should.equal(['arn:alarm']) - list(alarm.ok_actions).should.equal(['arn:ok']) - list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) - alarm.unit.should.equal('Seconds') - - -@mock_cloudwatch_deprecated -def test_delete_alarm(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - alarm = alarm_fixture() - conn.create_alarm(alarm) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(1) - - alarms[0].delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - -@mock_cloudwatch_deprecated -def test_put_metric_data(): - conn = boto.connect_cloudwatch() - - conn.put_metric_data( - namespace='tester', - name='metric', - value=1.5, - dimensions={'InstanceId': ['i-0123456,i-0123457']}, - ) - - metrics = conn.list_metrics() - metrics.should.have.length_of(1) - metric = metrics[0] - metric.namespace.should.equal('tester') - metric.name.should.equal('metric') - dict(metric.dimensions).should.equal( - {'InstanceId': ['i-0123456,i-0123457']}) - - -@mock_cloudwatch_deprecated -def test_describe_alarms(): - conn = boto.connect_cloudwatch() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) - - conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) - conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) - conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) - conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) - - alarms = conn.describe_alarms() - alarms.should.have.length_of(4) - alarms = conn.describe_alarms(alarm_name_prefix="nfoo") - alarms.should.have.length_of(2) - alarms = conn.describe_alarms( - alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) - alarms.should.have.length_of(3) - alarms = conn.describe_alarms(action_prefix="afoo") - alarms.should.have.length_of(2) - - for alarm in conn.describe_alarms(): - alarm.delete() - - alarms = conn.describe_alarms() - alarms.should.have.length_of(0) +import boto +from boto.ec2.cloudwatch.alarm import MetricAlarm +import boto3 +from datetime import datetime, timedelta +import pytz +import sure # noqa + +from moto import mock_cloudwatch_deprecated + + +def alarm_fixture(name="tester", action=None): + action = action or ['arn:alarm'] + return MetricAlarm( + name=name, + namespace="{0}_namespace".format(name), + metric="{0}_metric".format(name), + comparison='>=', + threshold=2.0, + period=60, + evaluation_periods=5, + statistic='Average', + description='A test', + dimensions={'InstanceId': ['i-0123456,i-0123457']}, + alarm_actions=action, + ok_actions=['arn:ok'], + insufficient_data_actions=['arn:insufficient'], + unit='Seconds', + ) + + +@mock_cloudwatch_deprecated +def test_create_alarm(): + conn = boto.connect_cloudwatch() + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + alarm = alarms[0] + alarm.name.should.equal('tester') + alarm.namespace.should.equal('tester_namespace') + alarm.metric.should.equal('tester_metric') + alarm.comparison.should.equal('>=') + alarm.threshold.should.equal(2.0) + alarm.period.should.equal(60) + alarm.evaluation_periods.should.equal(5) + alarm.statistic.should.equal('Average') + alarm.description.should.equal('A test') + dict(alarm.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) + list(alarm.alarm_actions).should.equal(['arn:alarm']) + list(alarm.ok_actions).should.equal(['arn:ok']) + list(alarm.insufficient_data_actions).should.equal(['arn:insufficient']) + alarm.unit.should.equal('Seconds') + + +@mock_cloudwatch_deprecated +def test_delete_alarm(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + alarm = alarm_fixture() + conn.create_alarm(alarm) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(1) + + alarms[0].delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + +@mock_cloudwatch_deprecated +def test_put_metric_data(): + conn = boto.connect_cloudwatch() + + conn.put_metric_data( + namespace='tester', + name='metric', + value=1.5, + dimensions={'InstanceId': ['i-0123456,i-0123457']}, + ) + + metrics = conn.list_metrics() + metrics.should.have.length_of(1) + metric = metrics[0] + metric.namespace.should.equal('tester') + metric.name.should.equal('metric') + dict(metric.dimensions).should.equal( + {'InstanceId': ['i-0123456,i-0123457']}) + + +@mock_cloudwatch_deprecated +def test_describe_alarms(): + conn = boto.connect_cloudwatch() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) + + conn.create_alarm(alarm_fixture(name="nfoobar", action="afoobar")) + conn.create_alarm(alarm_fixture(name="nfoobaz", action="afoobaz")) + conn.create_alarm(alarm_fixture(name="nbarfoo", action="abarfoo")) + conn.create_alarm(alarm_fixture(name="nbazfoo", action="abazfoo")) + + alarms = conn.describe_alarms() + alarms.should.have.length_of(4) + alarms = conn.describe_alarms(alarm_name_prefix="nfoo") + alarms.should.have.length_of(2) + alarms = conn.describe_alarms( + alarm_names=["nfoobar", "nbarfoo", "nbazfoo"]) + alarms.should.have.length_of(3) + alarms = conn.describe_alarms(action_prefix="afoo") + alarms.should.have.length_of(2) + + for alarm in conn.describe_alarms(): + alarm.delete() + + alarms = conn.describe_alarms() + alarms.should.have.length_of(0) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 40b5eee08..3c205f400 100755 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -1,224 +1,224 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from datetime import datetime, timedelta -import pytz -import sure # noqa - -from moto import mock_cloudwatch - - -@mock_cloudwatch -def test_put_list_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - resp = client.list_dashboards() - - len(resp['DashboardEntries']).should.equal(1) - - -@mock_cloudwatch -def test_put_list_prefix_nomatch_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - resp = client.list_dashboards(DashboardNamePrefix='nomatch') - - len(resp['DashboardEntries']).should.equal(0) - - -@mock_cloudwatch -def test_delete_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - client.put_dashboard(DashboardName='test2', DashboardBody=widget) - client.put_dashboard(DashboardName='test3', DashboardBody=widget) - client.delete_dashboards(DashboardNames=['test2', 'test1']) - - resp = client.list_dashboards(DashboardNamePrefix='test3') - len(resp['DashboardEntries']).should.equal(1) - - -@mock_cloudwatch -def test_delete_dashboard_fail(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - client.put_dashboard(DashboardName='test2', DashboardBody=widget) - client.put_dashboard(DashboardName='test3', DashboardBody=widget) - # Doesnt delete anything if all dashboards to be deleted do not exist - try: - client.delete_dashboards(DashboardNames=['test2', 'test1', 'test_no_match']) - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFound') - else: - raise RuntimeError('Should of raised error') - - resp = client.list_dashboards() - len(resp['DashboardEntries']).should.equal(3) - - -@mock_cloudwatch -def test_get_dashboard(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' - client.put_dashboard(DashboardName='test1', DashboardBody=widget) - - resp = client.get_dashboard(DashboardName='test1') - resp.should.contain('DashboardArn') - resp.should.contain('DashboardBody') - resp['DashboardName'].should.equal('test1') - - -@mock_cloudwatch -def test_get_dashboard_fail(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - - try: - client.get_dashboard(DashboardName='test1') - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFound') - else: - raise RuntimeError('Should of raised error') - - -@mock_cloudwatch -def test_alarm_state(): - client = boto3.client('cloudwatch', region_name='eu-central-1') - - client.put_metric_alarm( - AlarmName='testalarm1', - MetricName='cpu', - Namespace='blah', - Period=10, - EvaluationPeriods=5, - Statistic='Average', - Threshold=2, - ComparisonOperator='GreaterThanThreshold', - ) - client.put_metric_alarm( - AlarmName='testalarm2', - MetricName='cpu', - Namespace='blah', - Period=10, - EvaluationPeriods=5, - Statistic='Average', - Threshold=2, - ComparisonOperator='GreaterThanThreshold', - ) - - # This is tested implicitly as if it doesnt work the rest will die - client.set_alarm_state( - AlarmName='testalarm1', - StateValue='ALARM', - StateReason='testreason', - StateReasonData='{"some": "json_data"}' - ) - - resp = client.describe_alarms( - StateValue='ALARM' - ) - len(resp['MetricAlarms']).should.equal(1) - resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') - resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') - - resp = client.describe_alarms( - StateValue='OK' - ) - len(resp['MetricAlarms']).should.equal(1) - resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') - resp['MetricAlarms'][0]['StateValue'].should.equal('OK') - - # Just for sanity - resp = client.describe_alarms() - len(resp['MetricAlarms']).should.equal(2) - - -@mock_cloudwatch -def test_put_metric_data_no_dimensions(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='metric', - Value=1.5, - ) - ] - ) - - metrics = conn.list_metrics()['Metrics'] - metrics.should.have.length_of(1) - metric = metrics[0] - metric['Namespace'].should.equal('tester') - metric['MetricName'].should.equal('metric') - - - -@mock_cloudwatch -def test_put_metric_data_with_statistics(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='statmetric', - Timestamp=datetime(2015, 1, 1), - # no Value to test https://github.com/spulec/moto/issues/1615 - StatisticValues=dict( - SampleCount=123.0, - Sum=123.0, - Minimum=123.0, - Maximum=123.0 - ), - Unit='Milliseconds', - StorageResolution=123 - ) - ] - ) - - metrics = conn.list_metrics()['Metrics'] - metrics.should.have.length_of(1) - metric = metrics[0] - metric['Namespace'].should.equal('tester') - metric['MetricName'].should.equal('statmetric') - # TODO: test statistics - https://github.com/spulec/moto/issues/1615 - -@mock_cloudwatch -def test_get_metric_statistics(): - conn = boto3.client('cloudwatch', region_name='us-east-1') - utc_now = datetime.now(tz=pytz.utc) - - conn.put_metric_data( - Namespace='tester', - MetricData=[ - dict( - MetricName='metric', - Value=1.5, - Timestamp=utc_now - ) - ] - ) - - stats = conn.get_metric_statistics( - Namespace='tester', - MetricName='metric', - StartTime=utc_now - timedelta(seconds=60), - EndTime=utc_now + timedelta(seconds=60), - Period=60, - Statistics=['SampleCount', 'Sum'] - ) - - stats['Datapoints'].should.have.length_of(1) - datapoint = stats['Datapoints'][0] - datapoint['SampleCount'].should.equal(1.0) - datapoint['Sum'].should.equal(1.5) +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from datetime import datetime, timedelta +import pytz +import sure # noqa + +from moto import mock_cloudwatch + + +@mock_cloudwatch +def test_put_list_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards() + + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_put_list_prefix_nomatch_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + resp = client.list_dashboards(DashboardNamePrefix='nomatch') + + len(resp['DashboardEntries']).should.equal(0) + + +@mock_cloudwatch +def test_delete_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + client.delete_dashboards(DashboardNames=['test2', 'test1']) + + resp = client.list_dashboards(DashboardNamePrefix='test3') + len(resp['DashboardEntries']).should.equal(1) + + +@mock_cloudwatch +def test_delete_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + client.put_dashboard(DashboardName='test2', DashboardBody=widget) + client.put_dashboard(DashboardName='test3', DashboardBody=widget) + # Doesnt delete anything if all dashboards to be deleted do not exist + try: + client.delete_dashboards(DashboardNames=['test2', 'test1', 'test_no_match']) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + resp = client.list_dashboards() + len(resp['DashboardEntries']).should.equal(3) + + +@mock_cloudwatch +def test_get_dashboard(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + widget = '{"widgets": [{"type": "text", "x": 0, "y": 7, "width": 3, "height": 3, "properties": {"markdown": "Hello world"}}]}' + client.put_dashboard(DashboardName='test1', DashboardBody=widget) + + resp = client.get_dashboard(DashboardName='test1') + resp.should.contain('DashboardArn') + resp.should.contain('DashboardBody') + resp['DashboardName'].should.equal('test1') + + +@mock_cloudwatch +def test_get_dashboard_fail(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + try: + client.get_dashboard(DashboardName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFound') + else: + raise RuntimeError('Should of raised error') + + +@mock_cloudwatch +def test_alarm_state(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + client.put_metric_alarm( + AlarmName='testalarm1', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + client.put_metric_alarm( + AlarmName='testalarm2', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + + # This is tested implicitly as if it doesnt work the rest will die + client.set_alarm_state( + AlarmName='testalarm1', + StateValue='ALARM', + StateReason='testreason', + StateReasonData='{"some": "json_data"}' + ) + + resp = client.describe_alarms( + StateValue='ALARM' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') + + resp = client.describe_alarms( + StateValue='OK' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + resp['MetricAlarms'][0]['StateValue'].should.equal('OK') + + # Just for sanity + resp = client.describe_alarms() + len(resp['MetricAlarms']).should.equal(2) + + +@mock_cloudwatch +def test_put_metric_data_no_dimensions(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('metric') + + + +@mock_cloudwatch +def test_put_metric_data_with_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='statmetric', + Timestamp=datetime(2015, 1, 1), + # no Value to test https://github.com/spulec/moto/issues/1615 + StatisticValues=dict( + SampleCount=123.0, + Sum=123.0, + Minimum=123.0, + Maximum=123.0 + ), + Unit='Milliseconds', + StorageResolution=123 + ) + ] + ) + + metrics = conn.list_metrics()['Metrics'] + metrics.should.have.length_of(1) + metric = metrics[0] + metric['Namespace'].should.equal('tester') + metric['MetricName'].should.equal('statmetric') + # TODO: test statistics - https://github.com/spulec/moto/issues/1615 + +@mock_cloudwatch +def test_get_metric_statistics(): + conn = boto3.client('cloudwatch', region_name='us-east-1') + utc_now = datetime.now(tz=pytz.utc) + + conn.put_metric_data( + Namespace='tester', + MetricData=[ + dict( + MetricName='metric', + Value=1.5, + Timestamp=utc_now + ) + ] + ) + + stats = conn.get_metric_statistics( + Namespace='tester', + MetricName='metric', + StartTime=utc_now - timedelta(seconds=60), + EndTime=utc_now + timedelta(seconds=60), + Period=60, + Statistics=['SampleCount', 'Sum'] + ) + + stats['Datapoints'].should.have.length_of(1) + datapoint = stats['Datapoints'][0] + datapoint['SampleCount'].should.equal(1.0) + datapoint['Sum'].should.equal(1.5) diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py index ac79fa223..278c20660 100644 --- a/tests/test_cognitoidentity/test_cognitoidentity.py +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -1,85 +1,85 @@ -from __future__ import unicode_literals - -import boto3 - -from moto import mock_cognitoidentity -import sure # noqa - -from moto.cognitoidentity.utils import get_random_identity_id - - -@mock_cognitoidentity -def test_create_identity_pool(): - conn = boto3.client('cognito-identity', 'us-west-2') - - result = conn.create_identity_pool(IdentityPoolName='TestPool', - AllowUnauthenticatedIdentities=False, - SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, - DeveloperProviderName='devname', - OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], - CognitoIdentityProviders=[ - { - 'ProviderName': 'testprovider', - 'ClientId': 'CLIENT12345', - 'ServerSideTokenCheck': True - }, - ], - SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) - assert result['IdentityPoolId'] != '' - - -# testing a helper function -def test_get_random_identity_id(): - assert len(get_random_identity_id('us-west-2')) > 0 - assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 - - -@mock_cognitoidentity -def test_get_id(): - # These two do NOT work in server mode. They just don't return the data from the model. - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_id(AccountId='someaccount', - IdentityPoolId='us-west-2:12345', - Logins={ - 'someurl': '12345' - }) - print(result) - assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - - -@mock_cognitoidentity -def test_get_credentials_for_identity(): - # These two do NOT work in server mode. They just don't return the data from the model. - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_credentials_for_identity(IdentityId='12345') - - assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 - - -@mock_cognitoidentity -def test_get_open_id_token_for_developer_identity(): - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_open_id_token_for_developer_identity( - IdentityPoolId='us-west-2:12345', - IdentityId='12345', - Logins={ - 'someurl': '12345' - }, - TokenDuration=123 - ) - assert len(result['Token']) - assert result['IdentityId'] == '12345' - -@mock_cognitoidentity -def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): - conn = boto3.client('cognito-identity', 'us-west-2') - result = conn.get_open_id_token_for_developer_identity( - IdentityPoolId='us-west-2:12345', - Logins={ - 'someurl': '12345' - }, - TokenDuration=123 - ) - assert len(result['Token']) > 0 - assert len(result['IdentityId']) > 0 +from __future__ import unicode_literals + +import boto3 + +from moto import mock_cognitoidentity +import sure # noqa + +from moto.cognitoidentity.utils import get_random_identity_id + + +@mock_cognitoidentity +def test_create_identity_pool(): + conn = boto3.client('cognito-identity', 'us-west-2') + + result = conn.create_identity_pool(IdentityPoolName='TestPool', + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, + DeveloperProviderName='devname', + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], + CognitoIdentityProviders=[ + { + 'ProviderName': 'testprovider', + 'ClientId': 'CLIENT12345', + 'ServerSideTokenCheck': True + }, + ], + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) + assert result['IdentityPoolId'] != '' + + +# testing a helper function +def test_get_random_identity_id(): + assert len(get_random_identity_id('us-west-2')) > 0 + assert len(get_random_identity_id('us-west-2').split(':')[1]) == 19 + + +@mock_cognitoidentity +def test_get_id(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_id(AccountId='someaccount', + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }) + print(result) + assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_credentials_for_identity(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_credentials_for_identity(IdentityId='12345') + + assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + IdentityId='12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) + assert result['IdentityId'] == '12345' + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity_when_no_explicit_identity_id(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) > 0 + assert len(result['IdentityId']) > 0 diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py index b63d42bc0..d093158c5 100644 --- a/tests/test_cognitoidentity/test_server.py +++ b/tests/test_cognitoidentity/test_server.py @@ -1,45 +1,45 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_cognitoidentity - -''' -Test the different server responses -''' - - -@mock_cognitoidentity -def test_create_identity_pool(): - - backend = server.create_backend_app("cognito-identity") - test_client = backend.test_client() - - res = test_client.post('/', - data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, - headers={ - "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - assert json_data['IdentityPoolName'] == "test" - - -@mock_cognitoidentity -def test_get_id(): - backend = server.create_backend_app("cognito-identity") - test_client = backend.test_client() - - res = test_client.post('/', - data=json.dumps({'AccountId': 'someaccount', - 'IdentityPoolId': 'us-west-2:12345', - 'Logins': {'someurl': '12345'}}), - headers={ - "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, - ) - - print(res.data) - json_data = json.loads(res.data.decode("utf-8")) - assert ':' in json_data['IdentityId'] +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_cognitoidentity + +''' +Test the different server responses +''' + + +@mock_cognitoidentity +def test_create_identity_pool(): + + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['IdentityPoolName'] == "test" + + +@mock_cognitoidentity +def test_get_id(): + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data=json.dumps({'AccountId': 'someaccount', + 'IdentityPoolId': 'us-west-2:12345', + 'Logins': {'someurl': '12345'}}), + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, + ) + + print(res.data) + json_data = json.loads(res.data.decode("utf-8")) + assert ':' in json_data['IdentityId'] diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index f72a44762..6c0ad131b 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1,601 +1,601 @@ -from __future__ import unicode_literals - -import boto3 -import json -import os -import uuid - -from jose import jws - -from moto import mock_cognitoidp -import sure # noqa - - -@mock_cognitoidp -def test_create_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - result = conn.create_user_pool( - PoolName=name, - LambdaConfig={ - "PreSignUp": value - } - ) - - result["UserPool"]["Id"].should_not.be.none - result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') - result["UserPool"]["Name"].should.equal(name) - result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) - - -@mock_cognitoidp -def test_list_user_pools(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - conn.create_user_pool(PoolName=name) - result = conn.list_user_pools(MaxResults=10) - result["UserPools"].should.have.length_of(1) - result["UserPools"][0]["Name"].should.equal(name) - - -@mock_cognitoidp -def test_describe_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_details = conn.create_user_pool( - PoolName=name, - LambdaConfig={ - "PreSignUp": value - } - ) - - result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) - result["UserPool"]["Name"].should.equal(name) - result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) - - -@mock_cognitoidp -def test_delete_user_pool(): - conn = boto3.client("cognito-idp", "us-west-2") - - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) - conn.delete_user_pool(UserPoolId=user_pool_id) - conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) - - -@mock_cognitoidp -def test_create_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - - -@mock_cognitoidp -def test_describe_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result = conn.describe_user_pool_domain(Domain=domain) - result["DomainDescription"]["Domain"].should.equal(domain) - result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) - result["DomainDescription"]["AWSAccountId"].should_not.be.none - - -@mock_cognitoidp -def test_delete_user_pool_domain(): - conn = boto3.client("cognito-idp", "us-west-2") - - domain = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - result = conn.describe_user_pool_domain(Domain=domain) - # This is a surprising behavior of the real service: describing a missing domain comes - # back with status 200 and a DomainDescription of {} - result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) - result["DomainDescription"].keys().should.have.length_of(0) - - -@mock_cognitoidp -def test_create_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=client_name, - CallbackURLs=[value], - ) - - result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) - result["UserPoolClient"]["ClientId"].should_not.be.none - result["UserPoolClient"]["ClientName"].should.equal(client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) - - -@mock_cognitoidp -def test_list_user_pool_clients(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) - result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) - result["UserPoolClients"].should.have.length_of(1) - result["UserPoolClients"][0]["ClientName"].should.equal(client_name) - - -@mock_cognitoidp -def test_describe_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - client_name = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=client_name, - CallbackURLs=[value], - ) - - result = conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - - result["UserPoolClient"]["ClientName"].should.equal(client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) - - -@mock_cognitoidp -def test_update_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - old_client_name = str(uuid.uuid4()) - new_client_name = str(uuid.uuid4()) - old_value = str(uuid.uuid4()) - new_value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=old_client_name, - CallbackURLs=[old_value], - ) - - result = conn.update_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ClientName=new_client_name, - CallbackURLs=[new_value], - ) - - result["UserPoolClient"]["ClientName"].should.equal(new_client_name) - result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) - result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) - - -@mock_cognitoidp -def test_delete_user_pool_client(): - conn = boto3.client("cognito-idp", "us-west-2") - - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_details = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - ) - - conn.delete_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - - caught = False - try: - conn.describe_user_pool_client( - UserPoolId=user_pool_id, - ClientId=client_details["UserPoolClient"]["ClientId"], - ) - except conn.exceptions.ResourceNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_create_identity_provider(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) - result["IdentityProvider"]["ProviderName"].should.equal(provider_name) - result["IdentityProvider"]["ProviderType"].should.equal(provider_type) - result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) - - -@mock_cognitoidp -def test_list_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={}, - ) - - result = conn.list_identity_providers( - UserPoolId=user_pool_id, - MaxResults=10, - ) - - result["Providers"].should.have.length_of(1) - result["Providers"][0]["ProviderName"].should.equal(provider_name) - result["Providers"][0]["ProviderType"].should.equal(provider_type) - - -@mock_cognitoidp -def test_describe_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - result = conn.describe_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ) - - result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) - result["IdentityProvider"]["ProviderName"].should.equal(provider_name) - result["IdentityProvider"]["ProviderType"].should.equal(provider_type) - result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) - - -@mock_cognitoidp -def test_delete_identity_providers(): - conn = boto3.client("cognito-idp", "us-west-2") - - provider_name = str(uuid.uuid4()) - provider_type = "Facebook" - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.create_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ProviderType=provider_type, - ProviderDetails={ - "thing": value - }, - ) - - conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) - - caught = False - try: - conn.describe_identity_provider( - UserPoolId=user_pool_id, - ProviderName=provider_name, - ) - except conn.exceptions.ResourceNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_admin_create_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - result = conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - UserAttributes=[ - {"Name": "thing", "Value": value} - ], - ) - - result["User"]["Username"].should.equal(username) - result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") - result["User"]["Attributes"].should.have.length_of(1) - result["User"]["Attributes"][0]["Name"].should.equal("thing") - result["User"]["Attributes"][0]["Value"].should.equal(value) - result["User"]["Enabled"].should.equal(True) - - -@mock_cognitoidp -def test_admin_get_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - value = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - UserAttributes=[ - {"Name": "thing", "Value": value} - ], - ) - - result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - result["Username"].should.equal(username) - result["UserAttributes"].should.have.length_of(1) - result["UserAttributes"][0]["Name"].should.equal("thing") - result["UserAttributes"][0]["Value"].should.equal(value) - - -@mock_cognitoidp -def test_admin_get_missing_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - - caught = False - try: - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - except conn.exceptions.UserNotFoundException: - caught = True - - caught.should.be.true - - -@mock_cognitoidp -def test_list_users(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - result = conn.list_users(UserPoolId=user_pool_id) - result["Users"].should.have.length_of(1) - result["Users"][0]["Username"].should.equal(username) - - -@mock_cognitoidp -def test_admin_disable_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - - result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) - list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ - ["Enabled"].should.equal(False) - - -@mock_cognitoidp -def test_admin_enable_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) - - result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) - list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected - - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ - ["Enabled"].should.equal(True) - - -@mock_cognitoidp -def test_admin_delete_user(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - conn.admin_create_user(UserPoolId=user_pool_id, Username=username) - conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) - - caught = False - try: - conn.admin_get_user(UserPoolId=user_pool_id, Username=username) - except conn.exceptions.UserNotFoundException: - caught = True - - caught.should.be.true - - -def authentication_flow(conn): - username = str(uuid.uuid4()) - temporary_password = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - user_attribute_name = str(uuid.uuid4()) - user_attribute_value = str(uuid.uuid4()) - client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - ReadAttributes=[user_attribute_name] - )["UserPoolClient"]["ClientId"] - - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - TemporaryPassword=temporary_password, - UserAttributes=[{ - 'Name': user_attribute_name, - 'Value': user_attribute_value - }] - ) - - result = conn.admin_initiate_auth( - UserPoolId=user_pool_id, - ClientId=client_id, - AuthFlow="ADMIN_NO_SRP_AUTH", - AuthParameters={ - "USERNAME": username, - "PASSWORD": temporary_password - }, - ) - - # A newly created user is forced to set a new password - result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") - result["Session"].should_not.be.none - - # This sets a new password and logs the user in (creates tokens) - new_password = str(uuid.uuid4()) - result = conn.respond_to_auth_challenge( - Session=result["Session"], - ClientId=client_id, - ChallengeName="NEW_PASSWORD_REQUIRED", - ChallengeResponses={ - "USERNAME": username, - "NEW_PASSWORD": new_password - } - ) - - result["AuthenticationResult"]["IdToken"].should_not.be.none - result["AuthenticationResult"]["AccessToken"].should_not.be.none - - return { - "user_pool_id": user_pool_id, - "client_id": client_id, - "id_token": result["AuthenticationResult"]["IdToken"], - "access_token": result["AuthenticationResult"]["AccessToken"], - "username": username, - "password": new_password, - "additional_fields": { - user_attribute_name: user_attribute_value - } - } - - -@mock_cognitoidp -def test_authentication_flow(): - conn = boto3.client("cognito-idp", "us-west-2") - - authentication_flow(conn) - - -@mock_cognitoidp -def test_token_legitimacy(): - conn = boto3.client("cognito-idp", "us-west-2") - - path = "../../moto/cognitoidp/resources/jwks-public.json" - with open(os.path.join(os.path.dirname(__file__), path)) as f: - json_web_key = json.loads(f.read())["keys"][0] - - outputs = authentication_flow(conn) - id_token = outputs["id_token"] - access_token = outputs["access_token"] - client_id = outputs["client_id"] - issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"]) - id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) - id_claims["iss"].should.equal(issuer) - id_claims["aud"].should.equal(client_id) - access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) - access_claims["iss"].should.equal(issuer) - access_claims["aud"].should.equal(client_id) - for k, v in outputs["additional_fields"].items(): - access_claims[k].should.equal(v) - - -@mock_cognitoidp -def test_change_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - outputs = authentication_flow(conn) - - # Take this opportunity to test change_password, which requires an access token. - newer_password = str(uuid.uuid4()) - conn.change_password( - AccessToken=outputs["access_token"], - PreviousPassword=outputs["password"], - ProposedPassword=newer_password, - ) - - # Log in again, which should succeed without a challenge because the user is no - # longer in the force-new-password state. - result = conn.admin_initiate_auth( - UserPoolId=outputs["user_pool_id"], - ClientId=outputs["client_id"], - AuthFlow="ADMIN_NO_SRP_AUTH", - AuthParameters={ - "USERNAME": outputs["username"], - "PASSWORD": newer_password, - }, - ) - - result["AuthenticationResult"].should_not.be.none - - -@mock_cognitoidp -def test_forgot_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4())) - result["CodeDeliveryDetails"].should_not.be.none - - -@mock_cognitoidp -def test_confirm_forgot_password(): - conn = boto3.client("cognito-idp", "us-west-2") - - username = str(uuid.uuid4()) - user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] - client_id = conn.create_user_pool_client( - UserPoolId=user_pool_id, - ClientName=str(uuid.uuid4()), - )["UserPoolClient"]["ClientId"] - - conn.admin_create_user( - UserPoolId=user_pool_id, - Username=username, - TemporaryPassword=str(uuid.uuid4()), - ) - - conn.confirm_forgot_password( - ClientId=client_id, - Username=username, - ConfirmationCode=str(uuid.uuid4()), - Password=str(uuid.uuid4()), - ) +from __future__ import unicode_literals + +import boto3 +import json +import os +import uuid + +from jose import jws + +from moto import mock_cognitoidp +import sure # noqa + + +@mock_cognitoidp +def test_create_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + result = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result["UserPool"]["Id"].should_not.be.none + result["UserPool"]["Id"].should.match(r'[\w-]+_[0-9a-zA-Z]+') + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pools(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + conn.create_user_pool(PoolName=name) + result = conn.list_user_pools(MaxResults=10) + result["UserPools"].should.have.length_of(1) + result["UserPools"][0]["Name"].should.equal(name) + + +@mock_cognitoidp +def test_describe_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_details = conn.create_user_pool( + PoolName=name, + LambdaConfig={ + "PreSignUp": value + } + ) + + result = conn.describe_user_pool(UserPoolId=user_pool_details["UserPool"]["Id"]) + result["UserPool"]["Name"].should.equal(name) + result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) + + +@mock_cognitoidp +def test_delete_user_pool(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(1) + conn.delete_user_pool(UserPoolId=user_pool_id) + conn.list_user_pools(MaxResults=10)["UserPools"].should.have.length_of(0) + + +@mock_cognitoidp +def test_create_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + + +@mock_cognitoidp +def test_describe_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.describe_user_pool_domain(Domain=domain) + result["DomainDescription"]["Domain"].should.equal(domain) + result["DomainDescription"]["UserPoolId"].should.equal(user_pool_id) + result["DomainDescription"]["AWSAccountId"].should_not.be.none + + +@mock_cognitoidp +def test_delete_user_pool_domain(): + conn = boto3.client("cognito-idp", "us-west-2") + + domain = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result = conn.delete_user_pool_domain(UserPoolId=user_pool_id, Domain=domain) + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result = conn.describe_user_pool_domain(Domain=domain) + # This is a surprising behavior of the real service: describing a missing domain comes + # back with status 200 and a DomainDescription of {} + result["ResponseMetadata"]["HTTPStatusCode"].should.equal(200) + result["DomainDescription"].keys().should.have.length_of(0) + + +@mock_cognitoidp +def test_create_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result["UserPoolClient"]["UserPoolId"].should.equal(user_pool_id) + result["UserPoolClient"]["ClientId"].should_not.be.none + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_list_user_pool_clients(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_user_pool_client(UserPoolId=user_pool_id, ClientName=client_name) + result = conn.list_user_pool_clients(UserPoolId=user_pool_id, MaxResults=10) + result["UserPoolClients"].should.have.length_of(1) + result["UserPoolClients"][0]["ClientName"].should.equal(client_name) + + +@mock_cognitoidp +def test_describe_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + client_name = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=client_name, + CallbackURLs=[value], + ) + + result = conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + result["UserPoolClient"]["ClientName"].should.equal(client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(value) + + +@mock_cognitoidp +def test_update_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + old_client_name = str(uuid.uuid4()) + new_client_name = str(uuid.uuid4()) + old_value = str(uuid.uuid4()) + new_value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=old_client_name, + CallbackURLs=[old_value], + ) + + result = conn.update_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ClientName=new_client_name, + CallbackURLs=[new_value], + ) + + result["UserPoolClient"]["ClientName"].should.equal(new_client_name) + result["UserPoolClient"]["CallbackURLs"].should.have.length_of(1) + result["UserPoolClient"]["CallbackURLs"][0].should.equal(new_value) + + +@mock_cognitoidp +def test_delete_user_pool_client(): + conn = boto3.client("cognito-idp", "us-west-2") + + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_details = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ) + + conn.delete_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + + caught = False + try: + conn.describe_user_pool_client( + UserPoolId=user_pool_id, + ClientId=client_details["UserPoolClient"]["ClientId"], + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_create_identity_provider(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_list_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={}, + ) + + result = conn.list_identity_providers( + UserPoolId=user_pool_id, + MaxResults=10, + ) + + result["Providers"].should.have.length_of(1) + result["Providers"][0]["ProviderName"].should.equal(provider_name) + result["Providers"][0]["ProviderType"].should.equal(provider_type) + + +@mock_cognitoidp +def test_describe_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + result = conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + + result["IdentityProvider"]["UserPoolId"].should.equal(user_pool_id) + result["IdentityProvider"]["ProviderName"].should.equal(provider_name) + result["IdentityProvider"]["ProviderType"].should.equal(provider_type) + result["IdentityProvider"]["ProviderDetails"]["thing"].should.equal(value) + + +@mock_cognitoidp +def test_delete_identity_providers(): + conn = boto3.client("cognito-idp", "us-west-2") + + provider_name = str(uuid.uuid4()) + provider_type = "Facebook" + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.create_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ProviderType=provider_type, + ProviderDetails={ + "thing": value + }, + ) + + conn.delete_identity_provider(UserPoolId=user_pool_id, ProviderName=provider_name) + + caught = False + try: + conn.describe_identity_provider( + UserPoolId=user_pool_id, + ProviderName=provider_name, + ) + except conn.exceptions.ResourceNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_admin_create_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + result = conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result["User"]["Username"].should.equal(username) + result["User"]["UserStatus"].should.equal("FORCE_CHANGE_PASSWORD") + result["User"]["Attributes"].should.have.length_of(1) + result["User"]["Attributes"][0]["Name"].should.equal("thing") + result["User"]["Attributes"][0]["Value"].should.equal(value) + result["User"]["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_get_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[ + {"Name": "thing", "Value": value} + ], + ) + + result = conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + result["Username"].should.equal(username) + result["UserAttributes"].should.have.length_of(1) + result["UserAttributes"][0]["Name"].should.equal("thing") + result["UserAttributes"][0]["Value"].should.equal(value) + + +@mock_cognitoidp +def test_admin_get_missing_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +@mock_cognitoidp +def test_list_users(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + result = conn.list_users(UserPoolId=user_pool_id) + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username) + + +@mock_cognitoidp +def test_admin_disable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(False) + + +@mock_cognitoidp +def test_admin_enable_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_disable_user(UserPoolId=user_pool_id, Username=username) + + result = conn.admin_enable_user(UserPoolId=user_pool_id, Username=username) + list(result.keys()).should.equal(["ResponseMetadata"]) # No response expected + + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) \ + ["Enabled"].should.equal(True) + + +@mock_cognitoidp +def test_admin_delete_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user(UserPoolId=user_pool_id, Username=username) + conn.admin_delete_user(UserPoolId=user_pool_id, Username=username) + + caught = False + try: + conn.admin_get_user(UserPoolId=user_pool_id, Username=username) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + +def authentication_flow(conn): + username = str(uuid.uuid4()) + temporary_password = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + user_attribute_name = str(uuid.uuid4()) + user_attribute_value = str(uuid.uuid4()) + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + ReadAttributes=[user_attribute_name] + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=temporary_password, + UserAttributes=[{ + 'Name': user_attribute_name, + 'Value': user_attribute_value + }] + ) + + result = conn.admin_initiate_auth( + UserPoolId=user_pool_id, + ClientId=client_id, + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": username, + "PASSWORD": temporary_password + }, + ) + + # A newly created user is forced to set a new password + result["ChallengeName"].should.equal("NEW_PASSWORD_REQUIRED") + result["Session"].should_not.be.none + + # This sets a new password and logs the user in (creates tokens) + new_password = str(uuid.uuid4()) + result = conn.respond_to_auth_challenge( + Session=result["Session"], + ClientId=client_id, + ChallengeName="NEW_PASSWORD_REQUIRED", + ChallengeResponses={ + "USERNAME": username, + "NEW_PASSWORD": new_password + } + ) + + result["AuthenticationResult"]["IdToken"].should_not.be.none + result["AuthenticationResult"]["AccessToken"].should_not.be.none + + return { + "user_pool_id": user_pool_id, + "client_id": client_id, + "id_token": result["AuthenticationResult"]["IdToken"], + "access_token": result["AuthenticationResult"]["AccessToken"], + "username": username, + "password": new_password, + "additional_fields": { + user_attribute_name: user_attribute_value + } + } + + +@mock_cognitoidp +def test_authentication_flow(): + conn = boto3.client("cognito-idp", "us-west-2") + + authentication_flow(conn) + + +@mock_cognitoidp +def test_token_legitimacy(): + conn = boto3.client("cognito-idp", "us-west-2") + + path = "../../moto/cognitoidp/resources/jwks-public.json" + with open(os.path.join(os.path.dirname(__file__), path)) as f: + json_web_key = json.loads(f.read())["keys"][0] + + outputs = authentication_flow(conn) + id_token = outputs["id_token"] + access_token = outputs["access_token"] + client_id = outputs["client_id"] + issuer = "https://cognito-idp.us-west-2.amazonaws.com/{}".format(outputs["user_pool_id"]) + id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) + id_claims["iss"].should.equal(issuer) + id_claims["aud"].should.equal(client_id) + access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) + access_claims["iss"].should.equal(issuer) + access_claims["aud"].should.equal(client_id) + for k, v in outputs["additional_fields"].items(): + access_claims[k].should.equal(v) + + +@mock_cognitoidp +def test_change_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + outputs = authentication_flow(conn) + + # Take this opportunity to test change_password, which requires an access token. + newer_password = str(uuid.uuid4()) + conn.change_password( + AccessToken=outputs["access_token"], + PreviousPassword=outputs["password"], + ProposedPassword=newer_password, + ) + + # Log in again, which should succeed without a challenge because the user is no + # longer in the force-new-password state. + result = conn.admin_initiate_auth( + UserPoolId=outputs["user_pool_id"], + ClientId=outputs["client_id"], + AuthFlow="ADMIN_NO_SRP_AUTH", + AuthParameters={ + "USERNAME": outputs["username"], + "PASSWORD": newer_password, + }, + ) + + result["AuthenticationResult"].should_not.be.none + + +@mock_cognitoidp +def test_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + result = conn.forgot_password(ClientId=str(uuid.uuid4()), Username=str(uuid.uuid4())) + result["CodeDeliveryDetails"].should_not.be.none + + +@mock_cognitoidp +def test_confirm_forgot_password(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + client_id = conn.create_user_pool_client( + UserPoolId=user_pool_id, + ClientName=str(uuid.uuid4()), + )["UserPoolClient"]["ClientId"] + + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + TemporaryPassword=str(uuid.uuid4()), + ) + + conn.confirm_forgot_password( + ClientId=client_id, + Username=username, + ConfirmationCode=str(uuid.uuid4()), + Password=str(uuid.uuid4()), + ) diff --git a/tests/test_core/test_decorator_calls.py b/tests/test_core/test_decorator_calls.py index 5d2f6a4ef..b7e5f7448 100644 --- a/tests/test_core/test_decorator_calls.py +++ b/tests/test_core/test_decorator_calls.py @@ -1,98 +1,98 @@ -from __future__ import unicode_literals -import boto -from boto.exception import EC2ResponseError -import sure # noqa -import unittest - -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -from moto import mock_ec2_deprecated, mock_s3_deprecated - -''' -Test the different ways that the decorator can be used -''' - - -@mock_ec2_deprecated -def test_basic_connect(): - boto.connect_ec2() - - -@mock_ec2_deprecated -def test_basic_decorator(): - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - - -def test_context_manager(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - with mock_ec2_deprecated(): - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - - with assert_raises(EC2ResponseError): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.get_all_instances() - - -def test_decorator_start_and_stop(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - mock = mock_ec2_deprecated() - mock.start() - conn = boto.connect_ec2('the_key', 'the_secret') - list(conn.get_all_instances()).should.equal([]) - mock.stop() - - with assert_raises(EC2ResponseError): - conn.get_all_instances() - - -@mock_ec2_deprecated -def test_decorater_wrapped_gets_set(): - """ - Moto decorator's __wrapped__ should get set to the tests function - """ - test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( - 'test_decorater_wrapped_gets_set') - - -@mock_ec2_deprecated -class Tester(object): - - def test_the_class(self): - conn = boto.connect_ec2() - list(conn.get_all_instances()).should.have.length_of(0) - - def test_still_the_same(self): - conn = boto.connect_ec2() - list(conn.get_all_instances()).should.have.length_of(0) - - -@mock_s3_deprecated -class TesterWithSetup(unittest.TestCase): - - def setUp(self): - self.conn = boto.connect_s3() - self.conn.create_bucket('mybucket') - - def test_still_the_same(self): - bucket = self.conn.get_bucket('mybucket') - bucket.name.should.equal("mybucket") - - -@mock_s3_deprecated -class TesterWithStaticmethod(object): - - @staticmethod - def static(*args): - assert not args or not isinstance(args[0], TesterWithStaticmethod) - - def test_no_instance_sent_to_staticmethod(self): - self.static() +from __future__ import unicode_literals +import boto +from boto.exception import EC2ResponseError +import sure # noqa +import unittest + +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +from moto import mock_ec2_deprecated, mock_s3_deprecated + +''' +Test the different ways that the decorator can be used +''' + + +@mock_ec2_deprecated +def test_basic_connect(): + boto.connect_ec2() + + +@mock_ec2_deprecated +def test_basic_decorator(): + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + + +def test_context_manager(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + with mock_ec2_deprecated(): + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + + with assert_raises(EC2ResponseError): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.get_all_instances() + + +def test_decorator_start_and_stop(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + mock = mock_ec2_deprecated() + mock.start() + conn = boto.connect_ec2('the_key', 'the_secret') + list(conn.get_all_instances()).should.equal([]) + mock.stop() + + with assert_raises(EC2ResponseError): + conn.get_all_instances() + + +@mock_ec2_deprecated +def test_decorater_wrapped_gets_set(): + """ + Moto decorator's __wrapped__ should get set to the tests function + """ + test_decorater_wrapped_gets_set.__wrapped__.__name__.should.equal( + 'test_decorater_wrapped_gets_set') + + +@mock_ec2_deprecated +class Tester(object): + + def test_the_class(self): + conn = boto.connect_ec2() + list(conn.get_all_instances()).should.have.length_of(0) + + def test_still_the_same(self): + conn = boto.connect_ec2() + list(conn.get_all_instances()).should.have.length_of(0) + + +@mock_s3_deprecated +class TesterWithSetup(unittest.TestCase): + + def setUp(self): + self.conn = boto.connect_s3() + self.conn.create_bucket('mybucket') + + def test_still_the_same(self): + bucket = self.conn.get_bucket('mybucket') + bucket.name.should.equal("mybucket") + + +@mock_s3_deprecated +class TesterWithStaticmethod(object): + + @staticmethod + def static(*args): + assert not args or not isinstance(args[0], TesterWithStaticmethod) + + def test_no_instance_sent_to_staticmethod(self): + self.static() diff --git a/tests/test_core/test_instance_metadata.py b/tests/test_core/test_instance_metadata.py index f8bf24814..b66f9637e 100644 --- a/tests/test_core/test_instance_metadata.py +++ b/tests/test_core/test_instance_metadata.py @@ -1,46 +1,46 @@ -from __future__ import unicode_literals -import sure # noqa -from nose.tools import assert_raises -import requests - -from moto import mock_ec2, settings - -if settings.TEST_SERVER_MODE: - BASE_URL = 'http://localhost:5000' -else: - BASE_URL = 'http://169.254.169.254' - - -@mock_ec2 -def test_latest_meta_data(): - res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) - res.content.should.equal(b"iam") - - -@mock_ec2 -def test_meta_data_iam(): - res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) - json_response = res.json() - default_role = json_response['security-credentials']['default-role'] - default_role.should.contain('AccessKeyId') - default_role.should.contain('SecretAccessKey') - default_role.should.contain('Token') - default_role.should.contain('Expiration') - - -@mock_ec2 -def test_meta_data_security_credentials(): - res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) - res.content.should.equal(b"default-role") - - -@mock_ec2 -def test_meta_data_default_role(): - res = requests.get( - "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) - json_response = res.json() - json_response.should.contain('AccessKeyId') - json_response.should.contain('SecretAccessKey') - json_response.should.contain('Token') - json_response.should.contain('Expiration') +from __future__ import unicode_literals +import sure # noqa +from nose.tools import assert_raises +import requests + +from moto import mock_ec2, settings + +if settings.TEST_SERVER_MODE: + BASE_URL = 'http://localhost:5000' +else: + BASE_URL = 'http://169.254.169.254' + + +@mock_ec2 +def test_latest_meta_data(): + res = requests.get("{0}/latest/meta-data/".format(BASE_URL)) + res.content.should.equal(b"iam") + + +@mock_ec2 +def test_meta_data_iam(): + res = requests.get("{0}/latest/meta-data/iam".format(BASE_URL)) + json_response = res.json() + default_role = json_response['security-credentials']['default-role'] + default_role.should.contain('AccessKeyId') + default_role.should.contain('SecretAccessKey') + default_role.should.contain('Token') + default_role.should.contain('Expiration') + + +@mock_ec2 +def test_meta_data_security_credentials(): + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/".format(BASE_URL)) + res.content.should.equal(b"default-role") + + +@mock_ec2 +def test_meta_data_default_role(): + res = requests.get( + "{0}/latest/meta-data/iam/security-credentials/default-role".format(BASE_URL)) + json_response = res.json() + json_response.should.contain('AccessKeyId') + json_response.should.contain('SecretAccessKey') + json_response.should.contain('Token') + json_response.should.contain('Expiration') diff --git a/tests/test_core/test_moto_api.py b/tests/test_core/test_moto_api.py index cb0ca8939..47dbe5a4a 100644 --- a/tests/test_core/test_moto_api.py +++ b/tests/test_core/test_moto_api.py @@ -1,33 +1,33 @@ -from __future__ import unicode_literals -import sure # noqa -from nose.tools import assert_raises -import requests - -import boto3 -from moto import mock_sqs, settings - -base_url = "http://localhost:5000" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" - - -@mock_sqs -def test_reset_api(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="queue1") - conn.list_queues()['QueueUrls'].should.have.length_of(1) - - res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) - res.content.should.equal(b'{"status": "ok"}') - - conn.list_queues().shouldnt.contain('QueueUrls') # No more queues - - -@mock_sqs -def test_data_api(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="queue1") - - res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) - queues = res.json()['sqs']['Queue'] - len(queues).should.equal(1) - queue = queues[0] - queue['name'].should.equal("queue1") +from __future__ import unicode_literals +import sure # noqa +from nose.tools import assert_raises +import requests + +import boto3 +from moto import mock_sqs, settings + +base_url = "http://localhost:5000" if settings.TEST_SERVER_MODE else "http://motoapi.amazonaws.com" + + +@mock_sqs +def test_reset_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + res = requests.post("{base_url}/moto-api/reset".format(base_url=base_url)) + res.content.should.equal(b'{"status": "ok"}') + + conn.list_queues().shouldnt.contain('QueueUrls') # No more queues + + +@mock_sqs +def test_data_api(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + res = requests.post("{base_url}/moto-api/data.json".format(base_url=base_url)) + queues = res.json()['sqs']['Queue'] + len(queues).should.equal(1) + queue = queues[0] + queue['name'].should.equal("queue1") diff --git a/tests/test_core/test_nested.py b/tests/test_core/test_nested.py index 7c0b8f687..ec10a69b9 100644 --- a/tests/test_core/test_nested.py +++ b/tests/test_core/test_nested.py @@ -1,29 +1,29 @@ -from __future__ import unicode_literals -import unittest - -from boto.sqs.connection import SQSConnection -from boto.sqs.message import Message -from boto.ec2 import EC2Connection - -from moto import mock_sqs_deprecated, mock_ec2_deprecated - - -class TestNestedDecorators(unittest.TestCase): - - @mock_sqs_deprecated - def setup_sqs_queue(self): - conn = SQSConnection() - q = conn.create_queue('some-queue') - - m = Message() - m.set_body('This is my first message.') - q.write(m) - - self.assertEqual(q.count(), 1) - - @mock_ec2_deprecated - def test_nested(self): - self.setup_sqs_queue() - - conn = EC2Connection() - conn.run_instances('ami-123456') +from __future__ import unicode_literals +import unittest + +from boto.sqs.connection import SQSConnection +from boto.sqs.message import Message +from boto.ec2 import EC2Connection + +from moto import mock_sqs_deprecated, mock_ec2_deprecated + + +class TestNestedDecorators(unittest.TestCase): + + @mock_sqs_deprecated + def setup_sqs_queue(self): + conn = SQSConnection() + q = conn.create_queue('some-queue') + + m = Message() + m.set_body('This is my first message.') + q.write(m) + + self.assertEqual(q.count(), 1) + + @mock_ec2_deprecated + def test_nested(self): + self.setup_sqs_queue() + + conn = EC2Connection() + conn.run_instances('ami-123456') diff --git a/tests/test_core/test_responses.py b/tests/test_core/test_responses.py index c3cc27aef..f3f369ff3 100644 --- a/tests/test_core/test_responses.py +++ b/tests/test_core/test_responses.py @@ -1,81 +1,81 @@ -from __future__ import unicode_literals - -import sure # noqa - -from moto.core.responses import AWSServiceSpec -from moto.core.responses import flatten_json_request_body - - -def test_flatten_json_request_body(): - spec = AWSServiceSpec( - 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') - - body = { - 'Name': 'cluster', - 'Instances': { - 'Ec2KeyName': 'ec2key', - 'InstanceGroups': [ - {'InstanceRole': 'MASTER', - 'InstanceType': 'm1.small'}, - {'InstanceRole': 'CORE', - 'InstanceType': 'm1.medium'}, - ], - 'Placement': {'AvailabilityZone': 'us-east-1'}, - }, - 'Steps': [ - {'HadoopJarStep': { - 'Properties': [ - {'Key': 'k1', 'Value': 'v1'}, - {'Key': 'k2', 'Value': 'v2'} - ], - 'Args': ['arg1', 'arg2']}}, - ], - 'Configurations': [ - {'Classification': 'class', - 'Properties': {'propkey1': 'propkey1', - 'propkey2': 'propkey2'}}, - {'Classification': 'anotherclass', - 'Properties': {'propkey3': 'propkey3'}}, - ] - } - - flat = flatten_json_request_body('', body, spec) - flat['Name'].should.equal(body['Name']) - flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) - for idx in range(2): - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceRole']) - flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( - body['Instances']['InstanceGroups'][idx]['InstanceType']) - flat['Instances.Placement.AvailabilityZone'].should.equal( - body['Instances']['Placement']['AvailabilityZone']) - - for idx in range(1): - prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' - step = body['Steps'][idx]['HadoopJarStep'] - i = 0 - while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) - flat[prefix + '.Properties.member.' + - str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) - i += 1 - i = 0 - while prefix + '.Args.member.' + str(i + 1) in flat: - flat[prefix + '.Args.member.' + - str(i + 1)].should.equal(step['Args'][i]) - i += 1 - - for idx in range(2): - flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( - body['Configurations'][idx]['Classification']) - - props = {} - i = 1 - keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' - key = keyfmt.format(idx + 1, i) - while key + '.key' in flat: - props[flat[key + '.key']] = flat[key + '.value'] - i += 1 - key = keyfmt.format(idx + 1, i) - props.should.equal(body['Configurations'][idx]['Properties']) +from __future__ import unicode_literals + +import sure # noqa + +from moto.core.responses import AWSServiceSpec +from moto.core.responses import flatten_json_request_body + + +def test_flatten_json_request_body(): + spec = AWSServiceSpec( + 'data/emr/2009-03-31/service-2.json').input_spec('RunJobFlow') + + body = { + 'Name': 'cluster', + 'Instances': { + 'Ec2KeyName': 'ec2key', + 'InstanceGroups': [ + {'InstanceRole': 'MASTER', + 'InstanceType': 'm1.small'}, + {'InstanceRole': 'CORE', + 'InstanceType': 'm1.medium'}, + ], + 'Placement': {'AvailabilityZone': 'us-east-1'}, + }, + 'Steps': [ + {'HadoopJarStep': { + 'Properties': [ + {'Key': 'k1', 'Value': 'v1'}, + {'Key': 'k2', 'Value': 'v2'} + ], + 'Args': ['arg1', 'arg2']}}, + ], + 'Configurations': [ + {'Classification': 'class', + 'Properties': {'propkey1': 'propkey1', + 'propkey2': 'propkey2'}}, + {'Classification': 'anotherclass', + 'Properties': {'propkey3': 'propkey3'}}, + ] + } + + flat = flatten_json_request_body('', body, spec) + flat['Name'].should.equal(body['Name']) + flat['Instances.Ec2KeyName'].should.equal(body['Instances']['Ec2KeyName']) + for idx in range(2): + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceRole'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceRole']) + flat['Instances.InstanceGroups.member.' + str(idx + 1) + '.InstanceType'].should.equal( + body['Instances']['InstanceGroups'][idx]['InstanceType']) + flat['Instances.Placement.AvailabilityZone'].should.equal( + body['Instances']['Placement']['AvailabilityZone']) + + for idx in range(1): + prefix = 'Steps.member.' + str(idx + 1) + '.HadoopJarStep' + step = body['Steps'][idx]['HadoopJarStep'] + i = 0 + while prefix + '.Properties.member.' + str(i + 1) + '.Key' in flat: + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Key'].should.equal(step['Properties'][i]['Key']) + flat[prefix + '.Properties.member.' + + str(i + 1) + '.Value'].should.equal(step['Properties'][i]['Value']) + i += 1 + i = 0 + while prefix + '.Args.member.' + str(i + 1) in flat: + flat[prefix + '.Args.member.' + + str(i + 1)].should.equal(step['Args'][i]) + i += 1 + + for idx in range(2): + flat['Configurations.member.' + str(idx + 1) + '.Classification'].should.equal( + body['Configurations'][idx]['Classification']) + + props = {} + i = 1 + keyfmt = 'Configurations.member.{0}.Properties.entry.{1}' + key = keyfmt.format(idx + 1, i) + while key + '.key' in flat: + props[flat[key + '.key']] = flat[key + '.value'] + i += 1 + key = keyfmt.format(idx + 1, i) + props.should.equal(body['Configurations'][idx]['Properties']) diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index b7290e351..d1261a49a 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -1,53 +1,53 @@ -from __future__ import unicode_literals -from mock import patch -import sure # noqa - -from moto.server import main, create_backend_app, DomainDispatcherApplication - - -def test_wrong_arguments(): - try: - main(["name", "test1", "test2", "test3"]) - assert False, ("main() when called with the incorrect number of args" - " should raise a system exit") - except SystemExit: - pass - - -@patch('moto.server.run_simple') -def test_right_arguments(run_simple): - main(["s3"]) - func_call = run_simple.call_args[0] - func_call[0].should.equal("127.0.0.1") - func_call[1].should.equal(5000) - - -@patch('moto.server.run_simple') -def test_port_argument(run_simple): - main(["s3", "--port", "8080"]) - func_call = run_simple.call_args[0] - func_call[0].should.equal("127.0.0.1") - func_call[1].should.equal(8080) - - -def test_domain_dispatched(): - dispatcher = DomainDispatcherApplication(create_backend_app) - backend_app = dispatcher.get_application( - {"HTTP_HOST": "email.us-east1.amazonaws.com"}) - keys = list(backend_app.view_functions.keys()) - keys[0].should.equal('EmailResponse.dispatch') - - -def test_domain_without_matches(): - dispatcher = DomainDispatcherApplication(create_backend_app) - dispatcher.get_application.when.called_with( - {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) - - -def test_domain_dispatched_with_service(): - # If we pass a particular service, always return that. - dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") - backend_app = dispatcher.get_application( - {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) - keys = set(backend_app.view_functions.keys()) - keys.should.contain('ResponseObject.key_response') +from __future__ import unicode_literals +from mock import patch +import sure # noqa + +from moto.server import main, create_backend_app, DomainDispatcherApplication + + +def test_wrong_arguments(): + try: + main(["name", "test1", "test2", "test3"]) + assert False, ("main() when called with the incorrect number of args" + " should raise a system exit") + except SystemExit: + pass + + +@patch('moto.server.run_simple') +def test_right_arguments(run_simple): + main(["s3"]) + func_call = run_simple.call_args[0] + func_call[0].should.equal("127.0.0.1") + func_call[1].should.equal(5000) + + +@patch('moto.server.run_simple') +def test_port_argument(run_simple): + main(["s3", "--port", "8080"]) + func_call = run_simple.call_args[0] + func_call[0].should.equal("127.0.0.1") + func_call[1].should.equal(8080) + + +def test_domain_dispatched(): + dispatcher = DomainDispatcherApplication(create_backend_app) + backend_app = dispatcher.get_application( + {"HTTP_HOST": "email.us-east1.amazonaws.com"}) + keys = list(backend_app.view_functions.keys()) + keys[0].should.equal('EmailResponse.dispatch') + + +def test_domain_without_matches(): + dispatcher = DomainDispatcherApplication(create_backend_app) + dispatcher.get_application.when.called_with( + {"HTTP_HOST": "not-matching-anything.com"}).should.throw(RuntimeError) + + +def test_domain_dispatched_with_service(): + # If we pass a particular service, always return that. + dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") + backend_app = dispatcher.get_application( + {"HTTP_HOST": "s3.us-east1.amazonaws.com"}) + keys = set(backend_app.view_functions.keys()) + keys.should.contain('ResponseObject.key_response') diff --git a/tests/test_core/test_url_mapping.py b/tests/test_core/test_url_mapping.py index 8f7921a5a..b58e991c4 100644 --- a/tests/test_core/test_url_mapping.py +++ b/tests/test_core/test_url_mapping.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals -import sure # noqa - -from moto.core.utils import convert_regex_to_flask_path - - -def test_flask_path_converting_simple(): - convert_regex_to_flask_path("/").should.equal("/") - convert_regex_to_flask_path("/$").should.equal("/") - - convert_regex_to_flask_path("/foo").should.equal("/foo") - - convert_regex_to_flask_path("/foo/bar/").should.equal("/foo/bar/") - - -def test_flask_path_converting_regex(): - convert_regex_to_flask_path( - "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') - - convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( - '/' - ) +from __future__ import unicode_literals +import sure # noqa + +from moto.core.utils import convert_regex_to_flask_path + + +def test_flask_path_converting_simple(): + convert_regex_to_flask_path("/").should.equal("/") + convert_regex_to_flask_path("/$").should.equal("/") + + convert_regex_to_flask_path("/foo").should.equal("/foo") + + convert_regex_to_flask_path("/foo/bar/").should.equal("/foo/bar/") + + +def test_flask_path_converting_regex(): + convert_regex_to_flask_path( + "/(?P[a-zA-Z0-9\-_]+)").should.equal('/') + + convert_regex_to_flask_path("(?P\d+)/(?P.*)$").should.equal( + '/' + ) diff --git a/tests/test_core/test_utils.py b/tests/test_core/test_utils.py index 8dbf21716..22449a910 100644 --- a/tests/test_core/test_utils.py +++ b/tests/test_core/test_utils.py @@ -1,30 +1,30 @@ -from __future__ import unicode_literals - -import sure # noqa -from freezegun import freeze_time - -from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase, unix_time - - -def test_camelcase_to_underscores(): - cases = { - "theNewAttribute": "the_new_attribute", - "attri bute With Space": "attribute_with_space", - "FirstLetterCapital": "first_letter_capital", - "ListMFADevices": "list_mfa_devices", - } - for arg, expected in cases.items(): - camelcase_to_underscores(arg).should.equal(expected) - - -def test_underscores_to_camelcase(): - cases = { - "the_new_attribute": "theNewAttribute", - } - for arg, expected in cases.items(): - underscores_to_camelcase(arg).should.equal(expected) - - -@freeze_time("2015-01-01 12:00:00") -def test_unix_time(): - unix_time().should.equal(1420113600.0) +from __future__ import unicode_literals + +import sure # noqa +from freezegun import freeze_time + +from moto.core.utils import camelcase_to_underscores, underscores_to_camelcase, unix_time + + +def test_camelcase_to_underscores(): + cases = { + "theNewAttribute": "the_new_attribute", + "attri bute With Space": "attribute_with_space", + "FirstLetterCapital": "first_letter_capital", + "ListMFADevices": "list_mfa_devices", + } + for arg, expected in cases.items(): + camelcase_to_underscores(arg).should.equal(expected) + + +def test_underscores_to_camelcase(): + cases = { + "the_new_attribute": "theNewAttribute", + } + for arg, expected in cases.items(): + underscores_to_camelcase(arg).should.equal(expected) + + +@freeze_time("2015-01-01 12:00:00") +def test_unix_time(): + unix_time().should.equal(1420113600.0) diff --git a/tests/test_datapipeline/test_datapipeline.py b/tests/test_datapipeline/test_datapipeline.py index ce190c7e4..7cf76f5d7 100644 --- a/tests/test_datapipeline/test_datapipeline.py +++ b/tests/test_datapipeline/test_datapipeline.py @@ -1,204 +1,204 @@ -from __future__ import unicode_literals - -import boto.datapipeline -import sure # noqa - -from moto import mock_datapipeline_deprecated -from moto.datapipeline.utils import remove_capitalization_of_dict_keys - - -def get_value_from_fields(key, fields): - for field in fields: - if field['key'] == key: - return field['stringValue'] - - -@mock_datapipeline_deprecated -def test_create_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - pipeline_descriptions = conn.describe_pipelines( - [pipeline_id])["pipelineDescriptionList"] - pipeline_descriptions.should.have.length_of(1) - - pipeline_description = pipeline_descriptions[0] - pipeline_description['name'].should.equal("mypipeline") - pipeline_description["pipelineId"].should.equal(pipeline_id) - fields = pipeline_description['fields'] - - get_value_from_fields('@pipelineState', fields).should.equal("PENDING") - get_value_from_fields('uniqueId', fields).should.equal("some-unique-id") - - -PIPELINE_OBJECTS = [ - { - "id": "Default", - "name": "Default", - "fields": [{ - "key": "workerGroup", - "stringValue": "workerGroup" - }] - }, - { - "id": "Schedule", - "name": "Schedule", - "fields": [{ - "key": "startDateTime", - "stringValue": "2012-12-12T00:00:00" - }, { - "key": "type", - "stringValue": "Schedule" - }, { - "key": "period", - "stringValue": "1 hour" - }, { - "key": "endDateTime", - "stringValue": "2012-12-21T18:00:00" - }] - }, - { - "id": "SayHello", - "name": "SayHello", - "fields": [{ - "key": "type", - "stringValue": "ShellCommandActivity" - }, { - "key": "command", - "stringValue": "echo hello" - }, { - "key": "parent", - "refValue": "Default" - }, { - "key": "schedule", - "refValue": "Schedule" - }] - } -] - - -@mock_datapipeline_deprecated -def test_creating_pipeline_definition(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - pipeline_definition = conn.get_pipeline_definition(pipeline_id) - pipeline_definition['pipelineObjects'].should.have.length_of(3) - default_object = pipeline_definition['pipelineObjects'][0] - default_object['name'].should.equal("Default") - default_object['id'].should.equal("Default") - default_object['fields'].should.equal([{ - "key": "workerGroup", - "stringValue": "workerGroup" - }]) - - -@mock_datapipeline_deprecated -def test_describing_pipeline_objects(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) - - objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ - 'pipelineObjects'] - - objects.should.have.length_of(2) - default_object = [x for x in objects if x['id'] == 'Default'][0] - default_object['name'].should.equal("Default") - default_object['fields'].should.equal([{ - "key": "workerGroup", - "stringValue": "workerGroup" - }]) - - -@mock_datapipeline_deprecated -def test_activate_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - - res = conn.create_pipeline("mypipeline", "some-unique-id") - - pipeline_id = res["pipelineId"] - conn.activate_pipeline(pipeline_id) - - pipeline_descriptions = conn.describe_pipelines( - [pipeline_id])["pipelineDescriptionList"] - pipeline_descriptions.should.have.length_of(1) - pipeline_description = pipeline_descriptions[0] - fields = pipeline_description['fields'] - - get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED") - - -@mock_datapipeline_deprecated -def test_delete_pipeline(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res = conn.create_pipeline("mypipeline", "some-unique-id") - pipeline_id = res["pipelineId"] - - conn.delete_pipeline(pipeline_id) - - response = conn.list_pipelines() - - response["pipelineIdList"].should.have.length_of(0) - - -@mock_datapipeline_deprecated -def test_listing_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") - res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(False) - response["marker"].should.be.none - response["pipelineIdList"].should.have.length_of(2) - response["pipelineIdList"].should.contain({ - "id": res1["pipelineId"], - "name": "mypipeline1", - }) - response["pipelineIdList"].should.contain({ - "id": res2["pipelineId"], - "name": "mypipeline2" - }) - - -@mock_datapipeline_deprecated -def test_listing_paginated_pipelines(): - conn = boto.datapipeline.connect_to_region("us-west-2") - for i in range(100): - conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) - - response = conn.list_pipelines() - - response["hasMoreResults"].should.be(True) - response["marker"].should.equal(response["pipelineIdList"][-1]['id']) - response["pipelineIdList"].should.have.length_of(50) - - -# testing a helper function -def test_remove_capitalization_of_dict_keys(): - result = remove_capitalization_of_dict_keys( - { - "Id": "IdValue", - "Fields": [{ - "Key": "KeyValue", - "StringValue": "StringValueValue" - }] - } - ) - - result.should.equal({ - "id": "IdValue", - "fields": [{ - "key": "KeyValue", - "stringValue": "StringValueValue" - }], - }) +from __future__ import unicode_literals + +import boto.datapipeline +import sure # noqa + +from moto import mock_datapipeline_deprecated +from moto.datapipeline.utils import remove_capitalization_of_dict_keys + + +def get_value_from_fields(key, fields): + for field in fields: + if field['key'] == key: + return field['stringValue'] + + +@mock_datapipeline_deprecated +def test_create_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions.should.have.length_of(1) + + pipeline_description = pipeline_descriptions[0] + pipeline_description['name'].should.equal("mypipeline") + pipeline_description["pipelineId"].should.equal(pipeline_id) + fields = pipeline_description['fields'] + + get_value_from_fields('@pipelineState', fields).should.equal("PENDING") + get_value_from_fields('uniqueId', fields).should.equal("some-unique-id") + + +PIPELINE_OBJECTS = [ + { + "id": "Default", + "name": "Default", + "fields": [{ + "key": "workerGroup", + "stringValue": "workerGroup" + }] + }, + { + "id": "Schedule", + "name": "Schedule", + "fields": [{ + "key": "startDateTime", + "stringValue": "2012-12-12T00:00:00" + }, { + "key": "type", + "stringValue": "Schedule" + }, { + "key": "period", + "stringValue": "1 hour" + }, { + "key": "endDateTime", + "stringValue": "2012-12-21T18:00:00" + }] + }, + { + "id": "SayHello", + "name": "SayHello", + "fields": [{ + "key": "type", + "stringValue": "ShellCommandActivity" + }, { + "key": "command", + "stringValue": "echo hello" + }, { + "key": "parent", + "refValue": "Default" + }, { + "key": "schedule", + "refValue": "Schedule" + }] + } +] + + +@mock_datapipeline_deprecated +def test_creating_pipeline_definition(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + pipeline_definition = conn.get_pipeline_definition(pipeline_id) + pipeline_definition['pipelineObjects'].should.have.length_of(3) + default_object = pipeline_definition['pipelineObjects'][0] + default_object['name'].should.equal("Default") + default_object['id'].should.equal("Default") + default_object['fields'].should.equal([{ + "key": "workerGroup", + "stringValue": "workerGroup" + }]) + + +@mock_datapipeline_deprecated +def test_describing_pipeline_objects(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.put_pipeline_definition(PIPELINE_OBJECTS, pipeline_id) + + objects = conn.describe_objects(["Schedule", "Default"], pipeline_id)[ + 'pipelineObjects'] + + objects.should.have.length_of(2) + default_object = [x for x in objects if x['id'] == 'Default'][0] + default_object['name'].should.equal("Default") + default_object['fields'].should.equal([{ + "key": "workerGroup", + "stringValue": "workerGroup" + }]) + + +@mock_datapipeline_deprecated +def test_activate_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + + res = conn.create_pipeline("mypipeline", "some-unique-id") + + pipeline_id = res["pipelineId"] + conn.activate_pipeline(pipeline_id) + + pipeline_descriptions = conn.describe_pipelines( + [pipeline_id])["pipelineDescriptionList"] + pipeline_descriptions.should.have.length_of(1) + pipeline_description = pipeline_descriptions[0] + fields = pipeline_description['fields'] + + get_value_from_fields('@pipelineState', fields).should.equal("SCHEDULED") + + +@mock_datapipeline_deprecated +def test_delete_pipeline(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res = conn.create_pipeline("mypipeline", "some-unique-id") + pipeline_id = res["pipelineId"] + + conn.delete_pipeline(pipeline_id) + + response = conn.list_pipelines() + + response["pipelineIdList"].should.have.length_of(0) + + +@mock_datapipeline_deprecated +def test_listing_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + res1 = conn.create_pipeline("mypipeline1", "some-unique-id1") + res2 = conn.create_pipeline("mypipeline2", "some-unique-id2") + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(False) + response["marker"].should.be.none + response["pipelineIdList"].should.have.length_of(2) + response["pipelineIdList"].should.contain({ + "id": res1["pipelineId"], + "name": "mypipeline1", + }) + response["pipelineIdList"].should.contain({ + "id": res2["pipelineId"], + "name": "mypipeline2" + }) + + +@mock_datapipeline_deprecated +def test_listing_paginated_pipelines(): + conn = boto.datapipeline.connect_to_region("us-west-2") + for i in range(100): + conn.create_pipeline("mypipeline%d" % i, "some-unique-id%d" % i) + + response = conn.list_pipelines() + + response["hasMoreResults"].should.be(True) + response["marker"].should.equal(response["pipelineIdList"][-1]['id']) + response["pipelineIdList"].should.have.length_of(50) + + +# testing a helper function +def test_remove_capitalization_of_dict_keys(): + result = remove_capitalization_of_dict_keys( + { + "Id": "IdValue", + "Fields": [{ + "Key": "KeyValue", + "StringValue": "StringValueValue" + }] + } + ) + + result.should.equal({ + "id": "IdValue", + "fields": [{ + "key": "KeyValue", + "stringValue": "StringValueValue" + }], + }) diff --git a/tests/test_datapipeline/test_server.py b/tests/test_datapipeline/test_server.py index 03c77b034..7cb2657da 100644 --- a/tests/test_datapipeline/test_server.py +++ b/tests/test_datapipeline/test_server.py @@ -1,28 +1,28 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_datapipeline - -''' -Test the different server responses -''' - - -@mock_datapipeline -def test_list_streams(): - backend = server.create_backend_app("datapipeline") - test_client = backend.test_client() - - res = test_client.post('/', - data={"pipelineIds": ["ASdf"]}, - headers={ - "X-Amz-Target": "DataPipeline.DescribePipelines"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - json_data.should.equal({ - 'pipelineDescriptionList': [] - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_datapipeline + +''' +Test the different server responses +''' + + +@mock_datapipeline +def test_list_streams(): + backend = server.create_backend_app("datapipeline") + test_client = backend.test_client() + + res = test_client.post('/', + data={"pipelineIds": ["ASdf"]}, + headers={ + "X-Amz-Target": "DataPipeline.DescribePipelines"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + json_data.should.equal({ + 'pipelineDescriptionList': [] + }) diff --git a/tests/test_dynamodb/test_dynamodb.py b/tests/test_dynamodb/test_dynamodb.py index d48519755..2c675756f 100644 --- a/tests/test_dynamodb/test_dynamodb.py +++ b/tests/test_dynamodb/test_dynamodb.py @@ -1,54 +1,54 @@ -from __future__ import unicode_literals -import six -import boto -import boto.dynamodb -import sure # noqa -import requests -import tests.backport_assert_raises -from nose.tools import assert_raises - -from moto import mock_dynamodb, mock_dynamodb_deprecated -from moto.dynamodb import dynamodb_backend - -from boto.exception import DynamoDBResponseError - - -@mock_dynamodb_deprecated -def test_list_tables(): - name = 'TestTable' - dynamodb_backend.create_table( - name, hash_key_attr="name", hash_key_type="S") - conn = boto.connect_dynamodb('the_key', 'the_secret') - assert conn.list_tables() == ['TestTable'] - - -@mock_dynamodb_deprecated -def test_list_tables_layer_1(): - dynamodb_backend.create_table( - "test_1", hash_key_attr="name", hash_key_type="S") - dynamodb_backend.create_table( - "test_2", hash_key_attr="name", hash_key_type="S") - conn = boto.connect_dynamodb('the_key', 'the_secret') - res = conn.layer1.list_tables(limit=1) - expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} - res.should.equal(expected) - - res = conn.layer1.list_tables(limit=1, start_table="test_1") - expected = {"TableNames": ["test_2"]} - res.should.equal(expected) - - -@mock_dynamodb_deprecated -def test_describe_missing_table(): - conn = boto.connect_dynamodb('the_key', 'the_secret') - with assert_raises(DynamoDBResponseError): - conn.describe_table('messages') - - -@mock_dynamodb_deprecated -def test_dynamodb_with_connect_to_region(): - # this will work if connected with boto.connect_dynamodb() - dynamodb = boto.dynamodb.connect_to_region('us-west-2') - - schema = dynamodb.create_schema('column1', str(), 'column2', int()) - dynamodb.create_table('table1', schema, 200, 200) +from __future__ import unicode_literals +import six +import boto +import boto.dynamodb +import sure # noqa +import requests +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto import mock_dynamodb, mock_dynamodb_deprecated +from moto.dynamodb import dynamodb_backend + +from boto.exception import DynamoDBResponseError + + +@mock_dynamodb_deprecated +def test_list_tables(): + name = 'TestTable' + dynamodb_backend.create_table( + name, hash_key_attr="name", hash_key_type="S") + conn = boto.connect_dynamodb('the_key', 'the_secret') + assert conn.list_tables() == ['TestTable'] + + +@mock_dynamodb_deprecated +def test_list_tables_layer_1(): + dynamodb_backend.create_table( + "test_1", hash_key_attr="name", hash_key_type="S") + dynamodb_backend.create_table( + "test_2", hash_key_attr="name", hash_key_type="S") + conn = boto.connect_dynamodb('the_key', 'the_secret') + res = conn.layer1.list_tables(limit=1) + expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} + res.should.equal(expected) + + res = conn.layer1.list_tables(limit=1, start_table="test_1") + expected = {"TableNames": ["test_2"]} + res.should.equal(expected) + + +@mock_dynamodb_deprecated +def test_describe_missing_table(): + conn = boto.connect_dynamodb('the_key', 'the_secret') + with assert_raises(DynamoDBResponseError): + conn.describe_table('messages') + + +@mock_dynamodb_deprecated +def test_dynamodb_with_connect_to_region(): + # this will work if connected with boto.connect_dynamodb() + dynamodb = boto.dynamodb.connect_to_region('us-west-2') + + schema = dynamodb.create_schema('column1', str(), 'column2', int()) + dynamodb.create_table('table1', schema, 200, 200) diff --git a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py index 2a482b31e..ee6738934 100644 --- a/tests/test_dynamodb/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_with_range_key.py @@ -1,526 +1,526 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name='forum_name', - hash_key_proto_value=str, - range_key_name='subject', - range_key_proto_value=str - ) - - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - 'Table': { - 'CreationDateTime': 1326499200.0, - 'ItemCount': 0, - 'KeySchema': { - 'HashKeyElement': { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - 'RangeKeyElement': { - 'AttributeName': 'subject', - 'AttributeType': 'S' - } - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - }, - 'TableName': 'messages', - 'TableSizeBytes': 0, - 'TableStatus': 'ACTIVE' - } - } - conn.describe_table('messages').should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table('messages') - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with( - 'messages').should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.has_item("LOLCat Forum", "Check this out!").should.equal(True) - - returned_item = table.get_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - item['SentBy'] = 'User B' - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name='undeclared-table', - item=dict( - hash_key='LOLCat Forum', - range_key='Check this out!', - ), - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with( - hash_key='tester', - range_key='other', - ).should.throw(DynamoDBKeyNotFoundError) - table.has_item("foobar", "more").should.equal(False) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - 'RangeKeyElement': {'S': 'test-range'}, - }, - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_get_item_without_range_key(): - conn = boto.connect_dynamodb() - message_table_schema = conn.create_schema( - hash_key_name="test_hash", - hash_key_proto_value=int, - range_key_name="test_range", - range_key_proto_value=int, - ) - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - - hash_key = 3241526475 - range_key = 1234567890987 - new_item = table.new_item(hash_key=hash_key, range_key=range_key) - new_item.put() - - table.get_item.when.called_with( - hash_key=hash_key).should.throw(DynamoDBValidationError) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - range_key='Check this out!', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values='ALL_OLD') - response.should.equal({ - 'Attributes': { - 'Body': 'http://url_to_lolcat.gif', - 'forum_name': 'LOLCat Forum', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'SentBy': 'User A', - 'subject': 'Check this out!' - }, - 'ConsumedCapacityUnits': 0.5 - }) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - 'RangeKeyElement': {'S': 'test-range'}, - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='789', - attrs=item_data, - ) - item.put() - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('1')) - results.response['Items'].should.have.length_of(3) - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('234')) - results.response['Items'].should.have.length_of(2) - - results = table.query(hash_key='the-key', - range_key_condition=condition.GT('9999')) - results.response['Items'].should.have.length_of(0) - - results = table.query(hash_key='the-key', - range_key_condition=condition.CONTAINS('12')) - results.response['Items'].should.have.length_of(1) - - results = table.query(hash_key='the-key', - range_key_condition=condition.BEGINS_WITH('7')) - results.response['Items'].should.have.length_of(1) - - results = table.query(hash_key='the-key', - range_key_condition=condition.BETWEEN('567', '890')) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name='undeclared-table', - hash_key_value={'S': 'the-key'}, - range_key_conditions={ - "AttributeValueList": [{ - "S": "User B" - }], - "ComparisonOperator": "EQ", - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='the-key', - range_key='789', - attrs=item_data, - ) - item.put() - - results = table.scan() - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NULL()}) - results.response['Items'].should.have.length_of(2) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) - results.response['Items'].should.have.length_of(0) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item(hash_key='the-key', range_key='123') - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append(table.new_item( - hash_key='the-key', - range_key='123', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }, - )) - - items.append(table.new_item( - hash_key='the-key', - range_key='789', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - }, - )) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[('the-key', '789')]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - range_key='456', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key', - range_key='123', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='another-key', - range_key='789', - attrs=item_data, - ) - item.put() - - items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError, DynamoDBValidationError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name='forum_name', + hash_key_proto_value=str, + range_key_name='subject', + range_key_proto_value=str + ) + + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + 'Table': { + 'CreationDateTime': 1326499200.0, + 'ItemCount': 0, + 'KeySchema': { + 'HashKeyElement': { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + 'RangeKeyElement': { + 'AttributeName': 'subject', + 'AttributeType': 'S' + } + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10 + }, + 'TableName': 'messages', + 'TableSizeBytes': 0, + 'TableStatus': 'ACTIVE' + } + } + conn.describe_table('messages').should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table('messages') + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.has_item("LOLCat Forum", "Check this out!").should.equal(True) + + returned_item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + item['SentBy'] = 'User B' + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name='undeclared-table', + item=dict( + hash_key='LOLCat Forum', + range_key='Check this out!', + ), + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with( + hash_key='tester', + range_key='other', + ).should.throw(DynamoDBKeyNotFoundError) + table.has_item("foobar", "more").should.equal(False) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + 'RangeKeyElement': {'S': 'test-range'}, + }, + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_get_item_without_range_key(): + conn = boto.connect_dynamodb() + message_table_schema = conn.create_schema( + hash_key_name="test_hash", + hash_key_proto_value=int, + range_key_name="test_range", + range_key_proto_value=int, + ) + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + + hash_key = 3241526475 + range_key = 1234567890987 + new_item = table.new_item(hash_key=hash_key, range_key=range_key) + new_item.put() + + table.get_item.when.called_with( + hash_key=hash_key).should.throw(DynamoDBValidationError) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + range_key='Check this out!', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values='ALL_OLD') + response.should.equal({ + 'Attributes': { + 'Body': 'http://url_to_lolcat.gif', + 'forum_name': 'LOLCat Forum', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'SentBy': 'User A', + 'subject': 'Check this out!' + }, + 'ConsumedCapacityUnits': 0.5 + }) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + 'RangeKeyElement': {'S': 'test-range'}, + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='789', + attrs=item_data, + ) + item.put() + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('1')) + results.response['Items'].should.have.length_of(3) + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('234')) + results.response['Items'].should.have.length_of(2) + + results = table.query(hash_key='the-key', + range_key_condition=condition.GT('9999')) + results.response['Items'].should.have.length_of(0) + + results = table.query(hash_key='the-key', + range_key_condition=condition.CONTAINS('12')) + results.response['Items'].should.have.length_of(1) + + results = table.query(hash_key='the-key', + range_key_condition=condition.BEGINS_WITH('7')) + results.response['Items'].should.have.length_of(1) + + results = table.query(hash_key='the-key', + range_key_condition=condition.BETWEEN('567', '890')) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name='undeclared-table', + hash_key_value={'S': 'the-key'}, + range_key_conditions={ + "AttributeValueList": [{ + "S": "User B" + }], + "ComparisonOperator": "EQ", + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='the-key', + range_key='789', + attrs=item_data, + ) + item.put() + + results = table.scan() + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NULL()}) + results.response['Items'].should.have.length_of(2) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) + results.response['Items'].should.have.length_of(0) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item(hash_key='the-key', range_key='123') + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append(table.new_item( + hash_key='the-key', + range_key='123', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }, + )) + + items.append(table.new_item( + hash_key='the-key', + range_key='789', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + }, + )) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[('the-key', '789')]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + range_key='456', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key', + range_key='123', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='another-key', + range_key='789', + attrs=item_data, + ) + item.put() + + items = table.batch_get_item([('the-key', '123'), ('another-key', '789')]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.equal(2) diff --git a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py index ebd0c2051..c31b1994d 100644 --- a/tests/test_dynamodb/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb/test_dynamodb_table_without_range_key.py @@ -1,430 +1,430 @@ -from __future__ import unicode_literals - -import boto -import sure # noqa -from freezegun import freeze_time - -from moto import mock_dynamodb_deprecated - -from boto.dynamodb import condition -from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError -from boto.exception import DynamoDBResponseError - - -def create_table(conn): - message_table_schema = conn.create_schema( - hash_key_name='forum_name', - hash_key_proto_value=str, - ) - - table = conn.create_table( - name='messages', - schema=message_table_schema, - read_units=10, - write_units=10 - ) - return table - - -@freeze_time("2012-01-14") -@mock_dynamodb_deprecated -def test_create_table(): - conn = boto.connect_dynamodb() - create_table(conn) - - expected = { - 'Table': { - 'CreationDateTime': 1326499200.0, - 'ItemCount': 0, - 'KeySchema': { - 'HashKeyElement': { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 10 - }, - 'TableName': 'messages', - 'TableSizeBytes': 0, - 'TableStatus': 'ACTIVE', - } - } - conn.describe_table('messages').should.equal(expected) - - -@mock_dynamodb_deprecated -def test_delete_table(): - conn = boto.connect_dynamodb() - create_table(conn) - conn.list_tables().should.have.length_of(1) - - conn.layer1.delete_table('messages') - conn.list_tables().should.have.length_of(0) - - conn.layer1.delete_table.when.called_with( - 'messages').should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_update_table_throughput(): - conn = boto.connect_dynamodb() - table = create_table(conn) - table.read_units.should.equal(10) - table.write_units.should.equal(10) - - table.update_throughput(5, 6) - table.refresh() - - table.read_units.should.equal(5) - table.write_units.should.equal(6) - - -@mock_dynamodb_deprecated -def test_item_add_and_describe_and_update(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - item['SentBy'] = 'User B' - item.put() - - returned_item = table.get_item( - hash_key='LOLCat Forum', - attributes_to_get=['Body', 'SentBy'] - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@mock_dynamodb_deprecated -def test_item_put_without_table(): - conn = boto.connect_dynamodb() - - conn.layer1.put_item.when.called_with( - table_name='undeclared-table', - item=dict( - hash_key='LOLCat Forum', - ), - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_get_missing_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - table.get_item.when.called_with( - hash_key='tester', - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.get_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - }, - ).should.throw(DynamoDBKeyNotFoundError) - - -@mock_dynamodb_deprecated -def test_delete_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete() - response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_attribute_response(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='LOLCat Forum', - attrs=item_data, - ) - item.put() - - table.refresh() - table.item_count.should.equal(1) - - response = item.delete(return_values='ALL_OLD') - response.should.equal({ - u'Attributes': { - u'Body': u'http://url_to_lolcat.gif', - u'forum_name': u'LOLCat Forum', - u'ReceivedTime': u'12/9/2011 11:36:03 PM', - u'SentBy': u'User A', - }, - u'ConsumedCapacityUnits': 0.5 - }) - table.refresh() - table.item_count.should.equal(0) - - item.delete.when.called_with().should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.delete_item.when.called_with( - table_name='undeclared-table', - key={ - 'HashKeyElement': {'S': 'tester'}, - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_query(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - attrs=item_data, - ) - item.put() - - results = table.query(hash_key='the-key') - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_query_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.query.when.called_with( - table_name='undeclared-table', - hash_key_value={'S': 'the-key'}, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key2', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='the-key3', - attrs=item_data, - ) - item.put() - - results = table.scan() - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) - results.response['Items'].should.have.length_of(3) - - results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) - results.response['Items'].should.have.length_of(1) - - results = table.scan(scan_filter={'Ids': condition.NULL()}) - results.response['Items'].should.have.length_of(2) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) - results.response['Items'].should.have.length_of(0) - - results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) - results.response['Items'].should.have.length_of(1) - - -@mock_dynamodb_deprecated -def test_scan_with_undeclared_table(): - conn = boto.connect_dynamodb() - - conn.layer1.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(DynamoDBResponseError) - - -@mock_dynamodb_deprecated -def test_scan_after_has_item(): - conn = boto.connect_dynamodb() - table = create_table(conn) - list(table.scan()).should.equal([]) - - table.has_item('the-key') - - list(table.scan()).should.equal([]) - - -@mock_dynamodb_deprecated -def test_write_batch(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - batch_list = conn.new_batch_write_list() - - items = [] - items.append(table.new_item( - hash_key='the-key', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }, - )) - - items.append(table.new_item( - hash_key='the-key2', - attrs={ - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - }, - )) - - batch_list.add_batch(table, puts=items) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(2) - - batch_list = conn.new_batch_write_list() - batch_list.add_batch(table, deletes=[('the-key')]) - conn.batch_write_item(batch_list) - - table.refresh() - table.item_count.should.equal(1) - - -@mock_dynamodb_deprecated -def test_batch_read(): - conn = boto.connect_dynamodb() - table = create_table(conn) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = table.new_item( - hash_key='the-key1', - attrs=item_data, - ) - item.put() - - item = table.new_item( - hash_key='the-key2', - attrs=item_data, - ) - item.put() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = table.new_item( - hash_key='another-key', - attrs=item_data, - ) - item.put() - - items = table.batch_get_item([('the-key1'), ('another-key')]) - # Iterate through so that batch_item gets called - count = len([x for x in items]) - count.should.have.equal(2) +from __future__ import unicode_literals + +import boto +import sure # noqa +from freezegun import freeze_time + +from moto import mock_dynamodb_deprecated + +from boto.dynamodb import condition +from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError +from boto.exception import DynamoDBResponseError + + +def create_table(conn): + message_table_schema = conn.create_schema( + hash_key_name='forum_name', + hash_key_proto_value=str, + ) + + table = conn.create_table( + name='messages', + schema=message_table_schema, + read_units=10, + write_units=10 + ) + return table + + +@freeze_time("2012-01-14") +@mock_dynamodb_deprecated +def test_create_table(): + conn = boto.connect_dynamodb() + create_table(conn) + + expected = { + 'Table': { + 'CreationDateTime': 1326499200.0, + 'ItemCount': 0, + 'KeySchema': { + 'HashKeyElement': { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 10 + }, + 'TableName': 'messages', + 'TableSizeBytes': 0, + 'TableStatus': 'ACTIVE', + } + } + conn.describe_table('messages').should.equal(expected) + + +@mock_dynamodb_deprecated +def test_delete_table(): + conn = boto.connect_dynamodb() + create_table(conn) + conn.list_tables().should.have.length_of(1) + + conn.layer1.delete_table('messages') + conn.list_tables().should.have.length_of(0) + + conn.layer1.delete_table.when.called_with( + 'messages').should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_update_table_throughput(): + conn = boto.connect_dynamodb() + table = create_table(conn) + table.read_units.should.equal(10) + table.write_units.should.equal(10) + + table.update_throughput(5, 6) + table.refresh() + + table.read_units.should.equal(5) + table.write_units.should.equal(6) + + +@mock_dynamodb_deprecated +def test_item_add_and_describe_and_update(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + item['SentBy'] = 'User B' + item.put() + + returned_item = table.get_item( + hash_key='LOLCat Forum', + attributes_to_get=['Body', 'SentBy'] + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@mock_dynamodb_deprecated +def test_item_put_without_table(): + conn = boto.connect_dynamodb() + + conn.layer1.put_item.when.called_with( + table_name='undeclared-table', + item=dict( + hash_key='LOLCat Forum', + ), + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_get_missing_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + table.get_item.when.called_with( + hash_key='tester', + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.get_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + }, + ).should.throw(DynamoDBKeyNotFoundError) + + +@mock_dynamodb_deprecated +def test_delete_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete() + response.should.equal({u'Attributes': [], u'ConsumedCapacityUnits': 0.5}) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_attribute_response(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='LOLCat Forum', + attrs=item_data, + ) + item.put() + + table.refresh() + table.item_count.should.equal(1) + + response = item.delete(return_values='ALL_OLD') + response.should.equal({ + u'Attributes': { + u'Body': u'http://url_to_lolcat.gif', + u'forum_name': u'LOLCat Forum', + u'ReceivedTime': u'12/9/2011 11:36:03 PM', + u'SentBy': u'User A', + }, + u'ConsumedCapacityUnits': 0.5 + }) + table.refresh() + table.item_count.should.equal(0) + + item.delete.when.called_with().should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.delete_item.when.called_with( + table_name='undeclared-table', + key={ + 'HashKeyElement': {'S': 'tester'}, + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_query(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + attrs=item_data, + ) + item.put() + + results = table.query(hash_key='the-key') + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_query_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.query.when.called_with( + table_name='undeclared-table', + hash_key_value={'S': 'the-key'}, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key2', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='the-key3', + attrs=item_data, + ) + item.put() + + results = table.scan() + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'SentBy': condition.EQ('User B')}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Body': condition.BEGINS_WITH('http')}) + results.response['Items'].should.have.length_of(3) + + results = table.scan(scan_filter={'Ids': condition.CONTAINS(2)}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NOT_NULL()}) + results.response['Items'].should.have.length_of(1) + + results = table.scan(scan_filter={'Ids': condition.NULL()}) + results.response['Items'].should.have.length_of(2) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(8, 9)}) + results.response['Items'].should.have.length_of(0) + + results = table.scan(scan_filter={'PK': condition.BETWEEN(5, 8)}) + results.response['Items'].should.have.length_of(1) + + +@mock_dynamodb_deprecated +def test_scan_with_undeclared_table(): + conn = boto.connect_dynamodb() + + conn.layer1.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(DynamoDBResponseError) + + +@mock_dynamodb_deprecated +def test_scan_after_has_item(): + conn = boto.connect_dynamodb() + table = create_table(conn) + list(table.scan()).should.equal([]) + + table.has_item('the-key') + + list(table.scan()).should.equal([]) + + +@mock_dynamodb_deprecated +def test_write_batch(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + batch_list = conn.new_batch_write_list() + + items = [] + items.append(table.new_item( + hash_key='the-key', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }, + )) + + items.append(table.new_item( + hash_key='the-key2', + attrs={ + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + }, + )) + + batch_list.add_batch(table, puts=items) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(2) + + batch_list = conn.new_batch_write_list() + batch_list.add_batch(table, deletes=[('the-key')]) + conn.batch_write_item(batch_list) + + table.refresh() + table.item_count.should.equal(1) + + +@mock_dynamodb_deprecated +def test_batch_read(): + conn = boto.connect_dynamodb() + table = create_table(conn) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = table.new_item( + hash_key='the-key1', + attrs=item_data, + ) + item.put() + + item = table.new_item( + hash_key='the-key2', + attrs=item_data, + ) + item.put() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = table.new_item( + hash_key='another-key', + attrs=item_data, + ) + item.put() + + items = table.batch_get_item([('the-key1'), ('another-key')]) + # Iterate through so that batch_item gets called + count = len([x for x in items]) + count.should.have.equal(2) diff --git a/tests/test_dynamodb/test_server.py b/tests/test_dynamodb/test_server.py index 66004bbe1..a9fb7607e 100644 --- a/tests/test_dynamodb/test_server.py +++ b/tests/test_dynamodb/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_table_list(): - backend = server.create_backend_app("dynamodb") - test_client = backend.test_client() - - res = test_client.get('/') - res.status_code.should.equal(404) - - headers = {'X-Amz-Target': 'TestTable.ListTables'} - res = test_client.get('/', headers=headers) - res.data.should.contain(b'TableNames') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_table_list(): + backend = server.create_backend_app("dynamodb") + test_client = backend.test_client() + + res = test_client.get('/') + res.status_code.should.equal(404) + + headers = {'X-Amz-Target': 'TestTable.ListTables'} + res = test_client.get('/', headers=headers) + res.data.should.contain(b'TableNames') diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index afc919dd7..8cef24cda 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1,1338 +1,1338 @@ -from __future__ import unicode_literals, print_function - -from decimal import Decimal - -import six -import boto -import boto3 -from boto3.dynamodb.conditions import Attr, Key -import sure # noqa -import requests -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from moto.dynamodb2 import dynamodb_backend2 -from boto.exception import JSONResponseError -from botocore.exceptions import ClientError -from tests.helpers import requires_boto_gte -import tests.backport_assert_raises - -import moto.dynamodb2.comparisons -import moto.dynamodb2.models - -from nose.tools import assert_raises -try: - import boto.dynamodb2 -except ImportError: - print("This boto version is not supported") - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_list_tables(): - name = 'TestTable' - # Should make tables properly with boto - dynamodb_backend2.create_table(name, schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, - {u'KeyType': u'RANGE', u'AttributeName': u'subject'} - ]) - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk") - assert conn.list_tables()["TableNames"] == [name] - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_list_tables_layer_1(): - # Should make tables properly with boto - dynamodb_backend2.create_table("test_1", schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'name'} - ]) - dynamodb_backend2.create_table("test_2", schema=[ - {u'KeyType': u'HASH', u'AttributeName': u'name'} - ]) - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - res = conn.list_tables(limit=1) - expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} - res.should.equal(expected) - - res = conn.list_tables(limit=1, exclusive_start_table_name="test_1") - expected = {"TableNames": ["test_2"]} - res.should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_describe_missing_table(): - conn = boto.dynamodb2.connect_to_region( - 'us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - with assert_raises(JSONResponseError): - conn.describe_table('messages') - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - - # Tag table - tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] - conn.tag_resource(ResourceArn=arn, Tags=tags) - - # Check tags - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == tags - - # Remove 1 tag - conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) - - # Check tags - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags_empty(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - tags = [{'Key':'TestTag', 'Value': 'TestValue'}] - # conn.tag_resource(ResourceArn=arn, - # Tags=tags) - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert resp["Tags"] == [] - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_table_tags_paginated(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - table_description = conn.describe_table(TableName=name) - arn = table_description['Table']['TableArn'] - for i in range(11): - tags = [{'Key':'TestTag%d' % i, 'Value': 'TestValue'}] - conn.tag_resource(ResourceArn=arn, - Tags=tags) - resp = conn.list_tags_of_resource(ResourceArn=arn) - assert len(resp["Tags"]) == 10 - assert 'NextToken' in resp.keys() - resp2 = conn.list_tags_of_resource(ResourceArn=arn, - NextToken=resp['NextToken']) - assert len(resp2["Tags"]) == 1 - assert 'NextToken' not in resp2.keys() - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_list_not_found_table_tags(): - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - arn = 'DymmyArn' - try: - conn.list_tags_of_resource(ResourceArn=arn) - except ClientError as exception: - assert exception.response['Error']['Code'] == "ResourceNotFoundException" - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_item_add_empty_string_exception(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - with assert_raises(ClientError) as ex: - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - ex.exception.response['Error']['Code'].should.equal('ValidationException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_update_item_with_empty_string_exception(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - with assert_raises(ClientError) as ex: - conn.update_item( - TableName=name, - Key={ - 'forum_name': { 'S': 'LOLCat Forum'}, - }, - UpdateExpression='set Body=:Body', - ExpressionAttributeValues={ - ':Body': {'S': ''} - }) - - ex.exception.response['Error']['Code'].should.equal('ValidationException') - ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_query_invalid_table(): - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - try: - conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) - except ClientError as exception: - assert exception.response['Error']['Code'] == "ResourceNotFoundException" - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_scan_returns_consumed_capacity(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - } - ) - - response = conn.scan( - TableName=name, - ) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert response['ConsumedCapacity']['TableName'] == name - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_put_item_with_special_chars(): - name = 'TestTable' - conn = boto3.client('dynamodb', - region_name='us-west-2', - aws_access_key_id="ak", - aws_secret_access_key="sk") - - conn.create_table(TableName=name, - KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], - AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], - ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) - - conn.put_item( - TableName=name, - Item={ - 'forum_name': { 'S': 'LOLCat Forum' }, - 'subject': { 'S': 'Check this out!' }, - 'Body': { 'S': 'http://url_to_lolcat.gif'}, - 'SentBy': { 'S': "test" }, - 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, - '"': {"S": "foo"}, - } - ) - - -@requires_boto_gte("2.9") -@mock_dynamodb2 -def test_query_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message' - }) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') - ) - - assert 'ConsumedCapacity' in results - assert 'CapacityUnits' in results['ConsumedCapacity'] - assert results['ConsumedCapacity']['CapacityUnits'] == 1 - - -@mock_dynamodb2 -def test_basic_projection_expressions(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message' - }) - - table.put_item(Item={ - 'forum_name': 'not-the-key', - 'subject': '123', - 'body': 'some other test message' - }) - # Test a query returning all items - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='body, subject' - ) - - assert 'body' in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'subject' in results['Items'][0] - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '1234', - 'body': 'yet another test message' - }) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='body' - ) - - assert 'body' in results['Items'][0] - assert 'subject' not in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'body' in results['Items'][1] - assert 'subject' not in results['Items'][1] - assert results['Items'][1]['body'] == 'yet another test message' - - # The projection expression should not remove data from storage - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ) - assert 'subject' in results['Items'][0] - assert 'body' in results['Items'][1] - assert 'forum_name' in results['Items'][1] - - -@mock_dynamodb2 -def test_basic_projection_expressions_with_attr_expression_names(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - 'attachment': 'something' - }) - - table.put_item(Item={ - 'forum_name': 'not-the-key', - 'subject': '123', - 'body': 'some other test message', - 'attachment': 'something' - }) - # Test a query returning all items - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key'), - ProjectionExpression='#rl, #rt, subject', - ExpressionAttributeNames={ - '#rl': 'body', - '#rt': 'attachment' - }, - ) - - assert 'body' in results['Items'][0] - assert results['Items'][0]['body'] == 'some test message' - assert 'subject' in results['Items'][0] - assert results['Items'][0]['subject'] == '123' - assert 'attachment' in results['Items'][0] - assert results['Items'][0]['attachment'] == 'something' - - -@mock_dynamodb2 -def test_put_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - response = table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - assert 'ConsumedCapacity' in response - - -@mock_dynamodb2 -def test_update_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - response = table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='set body=:tb', - ExpressionAttributeValues={ - ':tb': 'a new message' - }) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert 'TableName' in response['ConsumedCapacity'] - - -@mock_dynamodb2 -def test_get_item_returns_consumed_capacity(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': 'some test message', - }) - - response = table.get_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - assert 'ConsumedCapacity' in response - assert 'CapacityUnits' in response['ConsumedCapacity'] - assert 'TableName' in response['ConsumedCapacity'] - - -def test_filter_expression(): - row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) - row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) - - # NOT test 1 - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # NOT test 2 - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) - filter_expr.expr(row1).should.be(False) # Id = 8 so should be false - - # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) - filter_expr.expr(row1).should.be(True) - - # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) - filter_expr.expr(row1).should.be(True) - - # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) - filter_expr.expr(row1).should.be(True) - - # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) - filter_expr.expr(row1).should.be(True) - - # attribute function tests (with extra spaces) - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) - filter_expr.expr(row1).should.be(True) - - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # beginswith function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # contains function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - # size function test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) - filter_expr.expr(row1).should.be(True) - - # Expression from @batkuip - filter_expr = moto.dynamodb2.comparisons.get_filter_expression( - '(#n0 < :v0 AND attribute_not_exists(#n1))', - {'#n0': 'Subs', '#n1': 'fanout_ts'}, - {':v0': {'N': '7'}} - ) - filter_expr.expr(row1).should.be(True) - # Expression from to check contains on string value - filter_expr = moto.dynamodb2.comparisons.get_filter_expression( - 'contains(#n0, :v0)', - {'#n0': 'Desc'}, - {':v0': {'S': 'Some'}} - ) - filter_expr.expr(row1).should.be(True) - filter_expr.expr(row2).should.be(False) - - -@mock_dynamodb2 -def test_query_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app2'} - } - ) - - table = dynamodb.Table('test1') - response = table.query( - KeyConditionExpression=Key('client').eq('client1') - ) - assert response['Count'] == 2 - - response = table.query( - KeyConditionExpression=Key('client').eq('client1'), - FilterExpression=Attr('app').eq('app2') - ) - assert response['Count'] == 1 - assert response['Items'][0]['app'] == 'app2' - response = table.query( - KeyConditionExpression=Key('client').eq('client1'), - FilterExpression=Attr('app').contains('app') - ) - assert response['Count'] == 2 - - -@mock_dynamodb2 -def test_scan_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('app').eq('app2') - ) - assert response['Count'] == 0 - - response = table.scan( - FilterExpression=Attr('app').eq('app1') - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter2(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'N': '1'} - } - ) - - response = client.scan( - TableName='test1', - Select='ALL_ATTRIBUTES', - FilterExpression='#tb >= :dt', - ExpressionAttributeNames={"#tb": "app"}, - ExpressionAttributeValues={":dt": {"N": str(1)}} - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter3(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'N': '1'}, - 'active': {'BOOL': True} - } - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('active').eq(True) - ) - assert response['Count'] == 1 - - -@mock_dynamodb2 -def test_scan_filter4(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - table = dynamodb.Table('test1') - response = table.scan( - FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() - ) - # Just testing - assert response['Count'] == 0 - - -@mock_dynamodb2 -def test_bad_scan_filter(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - table = dynamodb.Table('test1') - - # Bad expression - try: - table.scan( - FilterExpression='client test' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('ValidationError') - else: - raise RuntimeError('Should of raised ResourceInUseException') - - -@mock_dynamodb2 -def test_duplicate_create(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - try: - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceInUseException') - else: - raise RuntimeError('Should of raised ResourceInUseException') - - -@mock_dynamodb2 -def test_delete_table(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - client.delete_table(TableName='test1') - - resp = client.list_tables() - len(resp['TableNames']).should.equal(0) - - try: - client.delete_table(TableName='test1') - except ClientError as err: - err.response['Error']['Code'].should.equal('ResourceNotFoundException') - else: - raise RuntimeError('Should of raised ResourceNotFoundException') - - -@mock_dynamodb2 -def test_delete_item(): - client = boto3.client('dynamodb', region_name='us-east-1') - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app1'} - } - ) - client.put_item( - TableName='test1', - Item={ - 'client': {'S': 'client1'}, - 'app': {'S': 'app2'} - } - ) - - table = dynamodb.Table('test1') - response = table.scan() - assert response['Count'] == 2 - - # Test deletion and returning old value - response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') - response['Attributes'].should.contain('client') - response['Attributes'].should.contain('app') - - response = table.scan() - assert response['Count'] == 1 - - # Test deletion returning nothing - response = table.delete_item(Key={'client': 'client1', 'app': 'app2'}) - len(response['Attributes']).should.equal(0) - - response = table.scan() - assert response['Count'] == 0 - - -@mock_dynamodb2 -def test_describe_limits(): - client = boto3.client('dynamodb', region_name='eu-central-1') - resp = client.describe_limits() - - resp['AccountMaxReadCapacityUnits'].should.equal(20000) - resp['AccountMaxWriteCapacityUnits'].should.equal(20000) - resp['TableMaxWriteCapacityUnits'].should.equal(10000) - resp['TableMaxReadCapacityUnits'].should.equal(10000) - - -@mock_dynamodb2 -def test_set_ttl(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - - client.update_time_to_live( - TableName='test1', - TimeToLiveSpecification={ - 'Enabled': True, - 'AttributeName': 'expire' - } - ) - - resp = client.describe_time_to_live(TableName='test1') - resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') - resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') - - client.update_time_to_live( - TableName='test1', - TimeToLiveSpecification={ - 'Enabled': False, - 'AttributeName': 'expire' - } - ) - - resp = client.describe_time_to_live(TableName='test1') - resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') - - -# https://github.com/spulec/moto/issues/1043 -@mock_dynamodb2 -def test_query_missing_expr_names(): - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - client.create_table( - TableName='test1', - AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], - KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], - ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} - ) - client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) - client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) - - resp = client.query(TableName='test1', KeyConditionExpression='client=:client', - ExpressionAttributeValues={':client': {'S': 'test1'}}) - - resp['Count'].should.equal(1) - resp['Items'][0]['client']['S'].should.equal('test1') - - resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', - ExpressionAttributeNames={':name': 'client'}) - - resp['Count'].should.equal(1) - resp['Items'][0]['client']['S'].should.equal('test2') - - -# https://github.com/spulec/moto/issues/1342 -@mock_dynamodb2 -def test_update_item_on_map(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - client = boto3.client('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'body': {'nested': {'data': 'test'}}, - }) - - resp = table.scan() - resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) - - # Nonexistent nested attributes are supported for existing top-level attributes. - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2', - ExpressionAttributeNames={ - '#nested': 'nested', - '#nonexistentnested': 'nonexistentnested', - '#data': 'data' - }, - ExpressionAttributeValues={ - ':tb': 'new_value', - ':tb2': 'other_value' - }) - - resp = table.scan() - resp['Items'][0]['body'].should.equal({ - 'nested': { - 'data': 'new_value', - 'nonexistentnested': {'data': 'other_value'} - } - }) - - # Test nested value for a nonexistent attribute. - with assert_raises(client.exceptions.ConditionalCheckFailedException): - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - UpdateExpression='SET nonexistent.#nested = :tb', - ExpressionAttributeNames={ - '#nested': 'nested' - }, - ExpressionAttributeValues={ - ':tb': 'new_value' - }) - - - -# https://github.com/spulec/moto/issues/1358 -@mock_dynamodb2 -def test_update_if_not_exists(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - # if_not_exists without space - UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', - ExpressionAttributeValues={ - ':created_at': 123 - } - ) - - resp = table.scan() - assert resp['Items'][0]['created_at'] == 123 - - table.update_item(Key={ - 'forum_name': 'the-key', - 'subject': '123' - }, - # if_not_exists with space - UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', - ExpressionAttributeValues={ - ':created_at': 456 - } - ) - - resp = table.scan() - # Still the original value - assert resp['Items'][0]['created_at'] == 123 - - -@mock_dynamodb2 -def test_query_global_secondary_index_when_created_via_update_table_resource(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'user_id', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'user_id', - 'AttributeType': 'N', - }, - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - }, - ) - table = dynamodb.Table('users') - table.update( - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - ], - GlobalSecondaryIndexUpdates=[ - {'Create': - { - 'IndexName': 'forum_name_index', - 'KeySchema': [ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH', - }, - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - }, - } - } - ] - ) - - next_user_id = 1 - for my_forum_name in ['cats', 'dogs']: - for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: - table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) - next_user_id += 1 - - # get all the cat users - forum_only_query_response = table.query( - IndexName='forum_name_index', - Select='ALL_ATTRIBUTES', - KeyConditionExpression=Key('forum_name').eq('cats'), - ) - forum_only_items = forum_only_query_response['Items'] - assert len(forum_only_items) == 3 - for item in forum_only_items: - assert item['forum_name'] == 'cats' - - # query all cat users with a particular subject - forum_and_subject_query_results = table.query( - IndexName='forum_name_index', - Select='ALL_ATTRIBUTES', - KeyConditionExpression=Key('forum_name').eq('cats'), - FilterExpression=Attr('subject').eq('my pet is the cutest'), - ) - forum_and_subject_items = forum_and_subject_query_results['Items'] - assert len(forum_and_subject_items) == 1 - assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', - 'subject': 'my pet is the cutest'} +from __future__ import unicode_literals, print_function + +from decimal import Decimal + +import six +import boto +import boto3 +from boto3.dynamodb.conditions import Attr, Key +import sure # noqa +import requests +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from moto.dynamodb2 import dynamodb_backend2 +from boto.exception import JSONResponseError +from botocore.exceptions import ClientError +from tests.helpers import requires_boto_gte +import tests.backport_assert_raises + +import moto.dynamodb2.comparisons +import moto.dynamodb2.models + +from nose.tools import assert_raises +try: + import boto.dynamodb2 +except ImportError: + print("This boto version is not supported") + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_list_tables(): + name = 'TestTable' + # Should make tables properly with boto + dynamodb_backend2.create_table(name, schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, + {u'KeyType': u'RANGE', u'AttributeName': u'subject'} + ]) + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk") + assert conn.list_tables()["TableNames"] == [name] + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_list_tables_layer_1(): + # Should make tables properly with boto + dynamodb_backend2.create_table("test_1", schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'name'} + ]) + dynamodb_backend2.create_table("test_2", schema=[ + {u'KeyType': u'HASH', u'AttributeName': u'name'} + ]) + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + res = conn.list_tables(limit=1) + expected = {"TableNames": ["test_1"], "LastEvaluatedTableName": "test_1"} + res.should.equal(expected) + + res = conn.list_tables(limit=1, exclusive_start_table_name="test_1") + expected = {"TableNames": ["test_2"]} + res.should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_describe_missing_table(): + conn = boto.dynamodb2.connect_to_region( + 'us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + with assert_raises(JSONResponseError): + conn.describe_table('messages') + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + + # Tag table + tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] + conn.tag_resource(ResourceArn=arn, Tags=tags) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == tags + + # Remove 1 tag + conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags_empty(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + tags = [{'Key':'TestTag', 'Value': 'TestValue'}] + # conn.tag_resource(ResourceArn=arn, + # Tags=tags) + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [] + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_table_tags_paginated(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'id','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'id','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + table_description = conn.describe_table(TableName=name) + arn = table_description['Table']['TableArn'] + for i in range(11): + tags = [{'Key':'TestTag%d' % i, 'Value': 'TestValue'}] + conn.tag_resource(ResourceArn=arn, + Tags=tags) + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert len(resp["Tags"]) == 10 + assert 'NextToken' in resp.keys() + resp2 = conn.list_tags_of_resource(ResourceArn=arn, + NextToken=resp['NextToken']) + assert len(resp2["Tags"]) == 1 + assert 'NextToken' not in resp2.keys() + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_list_not_found_table_tags(): + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + arn = 'DymmyArn' + try: + conn.list_tags_of_resource(ResourceArn=arn) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_item_add_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + with assert_raises(ClientError) as ex: + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_update_item_with_empty_string_exception(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + with assert_raises(ClientError) as ex: + conn.update_item( + TableName=name, + Key={ + 'forum_name': { 'S': 'LOLCat Forum'}, + }, + UpdateExpression='set Body=:Body', + ExpressionAttributeValues={ + ':Body': {'S': ''} + }) + + ex.exception.response['Error']['Code'].should.equal('ValidationException') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'One or more parameter values were invalid: An AttributeValue may not contain an empty string' + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_invalid_table(): + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + try: + conn.query(TableName='invalid_table', KeyConditionExpression='index1 = :partitionkeyval', ExpressionAttributeValues={':partitionkeyval': {'S':'test'}}) + except ClientError as exception: + assert exception.response['Error']['Code'] == "ResourceNotFoundException" + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_scan_returns_consumed_capacity(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + } + ) + + response = conn.scan( + TableName=name, + ) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert response['ConsumedCapacity']['TableName'] == name + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_put_item_with_special_chars(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + '"': {"S": "foo"}, + } + ) + + +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_query_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') + ) + + assert 'ConsumedCapacity' in results + assert 'CapacityUnits' in results['ConsumedCapacity'] + assert results['ConsumedCapacity']['CapacityUnits'] == 1 + + +@mock_dynamodb2 +def test_basic_projection_expressions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message' + }) + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body, subject' + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '1234', + 'body': 'yet another test message' + }) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='body' + ) + + assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] + assert results['Items'][1]['body'] == 'yet another test message' + + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + + +@mock_dynamodb2 +def test_basic_projection_expressions_with_attr_expression_names(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + 'attachment': 'something' + }) + + table.put_item(Item={ + 'forum_name': 'not-the-key', + 'subject': '123', + 'body': 'some other test message', + 'attachment': 'something' + }) + # Test a query returning all items + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ProjectionExpression='#rl, #rt, subject', + ExpressionAttributeNames={ + '#rl': 'body', + '#rt': 'attachment' + }, + ) + + assert 'body' in results['Items'][0] + assert results['Items'][0]['body'] == 'some test message' + assert 'subject' in results['Items'][0] + assert results['Items'][0]['subject'] == '123' + assert 'attachment' in results['Items'][0] + assert results['Items'][0]['attachment'] == 'something' + + +@mock_dynamodb2 +def test_put_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + response = table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + assert 'ConsumedCapacity' in response + + +@mock_dynamodb2 +def test_update_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='set body=:tb', + ExpressionAttributeValues={ + ':tb': 'a new message' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] + + +@mock_dynamodb2 +def test_get_item_returns_consumed_capacity(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': 'some test message', + }) + + response = table.get_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + assert 'ConsumedCapacity' in response + assert 'CapacityUnits' in response['ConsumedCapacity'] + assert 'TableName' in response['ConsumedCapacity'] + + +def test_filter_expression(): + row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) + row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) + + # NOT test 1 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # NOT test 2 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) + filter_expr.expr(row1).should.be(False) # Id = 8 so should be false + + # AND test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # OR test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) + filter_expr.expr(row1).should.be(True) + + # BETWEEN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) + filter_expr.expr(row1).should.be(True) + + # PAREN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) + filter_expr.expr(row1).should.be(True) + + # IN test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) + filter_expr.expr(row1).should.be(True) + + # attribute function tests (with extra spaces) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_exists(Id) AND attribute_not_exists (User)', {}, {}) + filter_expr.expr(row1).should.be(True) + + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('attribute_type(Id, N)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # beginswith function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('begins_with(Desc, Some)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # contains function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('contains(KV, test1)', {}, {}) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + # size function test + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # Expression from @batkuip + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + '(#n0 < :v0 AND attribute_not_exists(#n1))', + {'#n0': 'Subs', '#n1': 'fanout_ts'}, + {':v0': {'N': '7'}} + ) + filter_expr.expr(row1).should.be(True) + # Expression from to check contains on string value + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + 'contains(#n0, :v0)', + {'#n0': 'Desc'}, + {':v0': {'S': 'Some'}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) + + +@mock_dynamodb2 +def test_query_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.query( + KeyConditionExpression=Key('client').eq('client1') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 1 + assert response['Items'][0]['app'] == 'app2' + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').contains('app') + ) + assert response['Count'] == 2 + + +@mock_dynamodb2 +def test_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 0 + + response = table.scan( + FilterExpression=Attr('app').eq('app1') + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter2(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'} + } + ) + + response = client.scan( + TableName='test1', + Select='ALL_ATTRIBUTES', + FilterExpression='#tb >= :dt', + ExpressionAttributeNames={"#tb": "app"}, + ExpressionAttributeValues={":dt": {"N": str(1)}} + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter3(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'}, + 'active': {'BOOL': True} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('active').eq(True) + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter4(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() + ) + # Just testing + assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_bad_scan_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + table = dynamodb.Table('test1') + + # Bad expression + try: + table.scan( + FilterExpression='client test' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ValidationError') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + +@mock_dynamodb2 +def test_duplicate_create(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + try: + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceInUseException') + else: + raise RuntimeError('Should of raised ResourceInUseException') + + +@mock_dynamodb2 +def test_delete_table(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.delete_table(TableName='test1') + + resp = client.list_tables() + len(resp['TableNames']).should.equal(0) + + try: + client.delete_table(TableName='test1') + except ClientError as err: + err.response['Error']['Code'].should.equal('ResourceNotFoundException') + else: + raise RuntimeError('Should of raised ResourceNotFoundException') + + +@mock_dynamodb2 +def test_delete_item(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.scan() + assert response['Count'] == 2 + + # Test deletion and returning old value + response = table.delete_item(Key={'client': 'client1', 'app': 'app1'}, ReturnValues='ALL_OLD') + response['Attributes'].should.contain('client') + response['Attributes'].should.contain('app') + + response = table.scan() + assert response['Count'] == 1 + + # Test deletion returning nothing + response = table.delete_item(Key={'client': 'client1', 'app': 'app2'}) + len(response['Attributes']).should.equal(0) + + response = table.scan() + assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_describe_limits(): + client = boto3.client('dynamodb', region_name='eu-central-1') + resp = client.describe_limits() + + resp['AccountMaxReadCapacityUnits'].should.equal(20000) + resp['AccountMaxWriteCapacityUnits'].should.equal(20000) + resp['TableMaxWriteCapacityUnits'].should.equal(10000) + resp['TableMaxReadCapacityUnits'].should.equal(10000) + + +@mock_dynamodb2 +def test_set_ttl(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': True, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') + resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': False, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') + + +# https://github.com/spulec/moto/issues/1043 +@mock_dynamodb2 +def test_query_missing_expr_names(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) + client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) + + resp = client.query(TableName='test1', KeyConditionExpression='client=:client', + ExpressionAttributeValues={':client': {'S': 'test1'}}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test1') + + resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', + ExpressionAttributeNames={':name': 'client'}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test2') + + +# https://github.com/spulec/moto/issues/1342 +@mock_dynamodb2 +def test_update_item_on_map(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': {'nested': {'data': 'test'}}, + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) + + # Nonexistent nested attributes are supported for existing top-level attributes. + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET body.#nested.#data = :tb, body.nested.#nonexistentnested.#data = :tb2', + ExpressionAttributeNames={ + '#nested': 'nested', + '#nonexistentnested': 'nonexistentnested', + '#data': 'data' + }, + ExpressionAttributeValues={ + ':tb': 'new_value', + ':tb2': 'other_value' + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({ + 'nested': { + 'data': 'new_value', + 'nonexistentnested': {'data': 'other_value'} + } + }) + + # Test nested value for a nonexistent attribute. + with assert_raises(client.exceptions.ConditionalCheckFailedException): + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET nonexistent.#nested = :tb', + ExpressionAttributeNames={ + '#nested': 'nested' + }, + ExpressionAttributeValues={ + ':tb': 'new_value' + }) + + + +# https://github.com/spulec/moto/issues/1358 +@mock_dynamodb2 +def test_update_if_not_exists(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists without space + UpdateExpression='SET created_at=if_not_exists(created_at,:created_at)', + ExpressionAttributeValues={ + ':created_at': 123 + } + ) + + resp = table.scan() + assert resp['Items'][0]['created_at'] == 123 + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + # if_not_exists with space + UpdateExpression='SET created_at = if_not_exists (created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 456 + } + ) + + resp = table.scan() + # Still the original value + assert resp['Items'][0]['created_at'] == 123 + + +@mock_dynamodb2 +def test_query_global_secondary_index_when_created_via_update_table_resource(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'user_id', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'user_id', + 'AttributeType': 'N', + }, + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + ) + table = dynamodb.Table('users') + table.update( + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + ], + GlobalSecondaryIndexUpdates=[ + {'Create': + { + 'IndexName': 'forum_name_index', + 'KeySchema': [ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH', + }, + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + }, + } + } + ] + ) + + next_user_id = 1 + for my_forum_name in ['cats', 'dogs']: + for my_subject in ['my pet is the cutest', 'wow look at what my pet did', "don't you love my pet?"]: + table.put_item(Item={'user_id': next_user_id, 'forum_name': my_forum_name, 'subject': my_subject}) + next_user_id += 1 + + # get all the cat users + forum_only_query_response = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + ) + forum_only_items = forum_only_query_response['Items'] + assert len(forum_only_items) == 3 + for item in forum_only_items: + assert item['forum_name'] == 'cats' + + # query all cat users with a particular subject + forum_and_subject_query_results = table.query( + IndexName='forum_name_index', + Select='ALL_ATTRIBUTES', + KeyConditionExpression=Key('forum_name').eq('cats'), + FilterExpression=Attr('subject').eq('my pet is the cutest'), + ) + forum_and_subject_items = forum_and_subject_query_results['Items'] + assert len(forum_and_subject_items) == 1 + assert forum_and_subject_items[0] == {'user_id': Decimal('1'), 'forum_name': 'cats', + 'subject': 'my pet is the cutest'} diff --git a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py index a9ab298b7..cc7fca11e 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_with_range_key.py @@ -1,1963 +1,1963 @@ -from __future__ import unicode_literals - -from decimal import Decimal - -import boto -import boto3 -from boto3.dynamodb.conditions import Key -from botocore.exceptions import ClientError -import sure # noqa -from freezegun import freeze_time -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from boto.exception import JSONResponseError -from tests.helpers import requires_boto_gte -try: - from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex - from boto.dynamodb2.table import Item, Table - from boto.dynamodb2.types import STRING, NUMBER - from boto.dynamodb2.exceptions import ValidationException - from boto.dynamodb2.exceptions import ConditionalCheckFailedException -except ImportError: - pass - - -def create_table(): - table = Table.create('messages', schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - return table - - -def create_table_with_local_indexes(): - table = Table.create( - 'messages', - schema=[ - HashKey('forum_name'), - RangeKey('subject'), - ], - throughput={ - 'read': 10, - 'write': 10, - }, - indexes=[ - AllIndex( - 'threads_index', - parts=[ - HashKey('forum_name', data_type=STRING), - RangeKey('threads', data_type=NUMBER), - ] - ) - ] - ) - return table - - -def iterate_results(res): - for i in res: - pass - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table(): - table = create_table() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [], - 'ItemCount': 0, 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table_with_local_index(): - table = create_table_with_local_indexes() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'}, - {'AttributeName': 'subject', 'AttributeType': 'S'}, - {'AttributeName': 'threads', 'AttributeType': 'N'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, - 'WriteCapacityUnits': 10, - 'ReadCapacityUnits': 10, - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, - {'KeyType': 'RANGE', 'AttributeName': 'subject'} - ], - 'LocalSecondaryIndexes': [ - { - 'IndexName': 'threads_index', - 'KeySchema': [ - {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, - {'AttributeName': 'threads', 'KeyType': 'RANGE'} - ], - 'Projection': {'ProjectionType': 'ALL'} - } - ], - 'ItemCount': 0, - 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [] - } - } - table.describe().should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - table = create_table() - conn.list_tables()["TableNames"].should.have.length_of(1) - - table.delete() - conn.list_tables()["TableNames"].should.have.length_of(0) - conn.delete_table.when.called_with( - 'messages').should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_update_table_throughput(): - table = create_table() - table.throughput["read"].should.equal(10) - table.throughput["write"].should.equal(10) - table.update(throughput={ - 'read': 5, - 'write': 15, - }) - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(15) - - table.update(throughput={ - 'read': 5, - 'write': 6, - }) - - table.describe() - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(6) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_add_and_describe_and_update(): - table = create_table() - ok = table.put_item(data={ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - ok.should.equal(True) - - table.get_item(forum_name="LOLCat Forum", - subject='Check this out!').should_not.be.none - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - returned_item['SentBy'] = 'User B' - returned_item.save(overwrite=True) - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='Check this out!' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'Check this out!', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_partial_save(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item( - forum_name="LOLCat Forum", subject='The LOLz') - - returned_item['SentBy'] = 'User B' - returned_item.partial_save() - - returned_item = table.get_item( - forum_name='LOLCat Forum', - subject='The LOLz' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'subject': 'The LOLz', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_put_without_table(): - table = Table('undeclared-table') - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_missing_item(): - table = create_table() - - table.get_item.when.called_with( - hash_key='tester', - range_key='other', - ).should.throw(ValidationException) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_with_undeclared_table(): - table = Table('undeclared-table') - table.get_item.when.called_with( - test_hash=3241526475).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_without_range_key(): - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) - table.get_item.when.called_with( - test_hash=hash_key).should.throw(ValidationException) - - -@requires_boto_gte("2.30.0") -@mock_dynamodb2_deprecated -def test_delete_item(): - table = create_table() - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item['subject'] = 'Check this out!' - item.save() - table.count().should.equal(1) - - response = item.delete() - response.should.equal(True) - - table.count().should.equal(0) - # Deletes are idempotent - item.delete().should.equal(True) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_item_with_undeclared_table(): - table = Table("undeclared-table") - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.delete.when.called_with().should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query(): - table = create_table() - - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'subject': 'Check this out!' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '456' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save(overwrite=True) - - item['forum_name'] = 'the-key' - item['subject'] = '789' - item.save(overwrite=True) - - table.count().should.equal(4) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - expected = ["123", "456", "789"] - for index, item in enumerate(results): - item["subject"].should.equal(expected[index]) - - results = table.query_2(forum_name__eq="the-key", - subject__gt='1', reverse=True) - for index, item in enumerate(results): - item["subject"].should.equal(expected[len(expected) - 1 - index]) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='1', consistent=True) - sum(1 for _ in results).should.equal(3) - - results = table.query_2(forum_name__eq='the-key', - subject__gt='234', consistent=True) - sum(1 for _ in results).should.equal(2) - - results = table.query_2(forum_name__eq='the-key', subject__gt='9999') - sum(1 for _ in results).should.equal(0) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') - sum(1 for _ in results).should.equal(1) - - results = table.query_2(forum_name__eq='the-key', - subject__between=['567', '890']) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_with_undeclared_table(): - table = Table('undeclared') - results = table.query( - forum_name__eq='Amazon DynamoDB', - subject__beginswith='DynamoDB', - limit=1 - ) - iterate_results.when.called_with(results).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item['forum_name'] = 'the-key' - item['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:09 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '789' - - item = Item(table, item_data) - item.save() - - results = table.scan() - sum(1 for _ in results).should.equal(3) - - results = table.scan(SentBy__eq='User B') - sum(1 for _ in results).should.equal(1) - - results = table.scan(Body__beginswith='http') - sum(1 for _ in results).should.equal(3) - - results = table.scan(Ids__null=False) - sum(1 for _ in results).should.equal(1) - - results = table.scan(Ids__null=True) - sum(1 for _ in results).should.equal(2) - - results = table.scan(PK__between=[8, 9]) - sum(1 for _ in results).should.equal(0) - - results = table.scan(PK__between=[5, 8]) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - conn.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_write_batch(): - table = create_table() - with table.batch_write() as batch: - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '123', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '789', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - table.count().should.equal(2) - with table.batch_write() as batch: - batch.delete_item( - forum_name='the-key', - subject='789' - ) - - table.count().should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_batch_read(): - table = create_table() - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - - item_data['forum_name'] = 'the-key' - item_data['subject'] = '456' - - item = Item(table, item_data) - item.save() - - item = Item(table, item_data) - item_data['forum_name'] = 'the-key' - item_data['subject'] = '123' - item.save() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = Item(table, item_data) - item_data['forum_name'] = 'another-key' - item_data['subject'] = '789' - item.save() - results = table.batch_get( - keys=[ - {'forum_name': 'the-key', 'subject': '123'}, - {'forum_name': 'another-key', 'subject': '789'}, - ] - ) - - # Iterate through so that batch_item gets called - count = len([x for x in results]) - count.should.equal(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_key_fields(): - table = create_table() - kf = table.get_key_fields() - kf.should.equal(['forum_name', 'subject']) - - -@mock_dynamodb2_deprecated -def test_create_with_global_indexes(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - ]) - - table_description = conn.describe_table("messages") - table_description['Table']["GlobalSecondaryIndexes"].should.equal([ - { - "IndexName": "topic-created_at-index", - "KeySchema": [ - { - "AttributeName": "topic", - "KeyType": "HASH" - }, - { - "AttributeName": "created_at", - "KeyType": "RANGE" - }, - ], - "Projection": { - "ProjectionType": "ALL" - }, - "ProvisionedThroughput": { - "ReadCapacityUnits": 6, - "WriteCapacityUnits": 1, - } - } - ]) - - -@mock_dynamodb2_deprecated -def test_query_with_global_indexes(): - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('version'), - ], global_indexes=[ - GlobalAllIndex('topic-created_at-index', - parts=[ - HashKey('topic'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 6, - 'write': 1 - } - ), - GlobalAllIndex('status-created_at-index', - parts=[ - HashKey('status'), - RangeKey('created_at', data_type='N') - ], - throughput={ - 'read': 2, - 'write': 1 - } - ) - ]) - - item_data = { - 'subject': 'Check this out!', - 'version': '1', - 'created_at': 0, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - - results = table.query(status__eq='active') - list(results).should.have.length_of(0) - - -@mock_dynamodb2_deprecated -def test_query_with_local_indexes(): - table = create_table_with_local_indexes() - item_data = { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - 'status': 'inactive' - } - item = Item(table, item_data) - item.save(overwrite=True) - - item['version'] = '2' - item.save(overwrite=True) - results = table.query(forum_name__eq='Cool Forum', - index='threads_index', threads__eq=1) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_eq(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - results = table.query_2( - forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 - ) - results = list(results) - results.should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gt(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 - ) - list(results).should.have.length_of(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_lte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 - ) - list(results).should.have.length_of(3) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_filter_gte(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '1', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '1', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 - ) - list(results).should.have.length_of(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_non_hash_range_key(): - table = create_table_with_local_indexes() - item_data = [ - { - 'forum_name': 'Cool Forum', - 'subject': 'Check this out!', - 'version': '1', - 'threads': 1, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Read this now!', - 'version': '3', - 'threads': 5, - }, - { - 'forum_name': 'Cool Forum', - 'subject': 'Please read this... please', - 'version': '2', - 'threads': 0, - } - ] - for data in item_data: - item = Item(table, data) - item.save(overwrite=True) - - results = table.query( - forum_name__eq='Cool Forum', version__gt="2" - ) - results = list(results) - results.should.have.length_of(1) - - results = table.query( - forum_name__eq='Cool Forum', version__lt="3" - ) - results = list(results) - results.should.have.length_of(2) - - -@mock_dynamodb2_deprecated -def test_reverse_query(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - table = Table.create('messages', schema=[ - HashKey('subject'), - RangeKey('created_at', data_type='N') - ]) - - for i in range(10): - table.put_item({ - 'subject': "Hi", - 'created_at': i - }) - - results = table.query_2(subject__eq="Hi", - created_at__lt=6, - limit=4, - reverse=True) - - expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] - [r['created_at'] for r in results].should.equal(expected) - - -@mock_dynamodb2_deprecated -def test_lookup(): - from decimal import Decimal - table = Table.create('messages', schema=[ - HashKey('test_hash'), - RangeKey('test_range'), - ], throughput={ - 'read': 10, - 'write': 10, - }) - - hash_key = 3241526475 - range_key = 1234567890987 - data = {'test_hash': hash_key, 'test_range': range_key} - table.put_item(data=data) - message = table.lookup(hash_key, range_key) - message.get('test_hash').should.equal(Decimal(hash_key)) - message.get('test_range').should.equal(Decimal(range_key)) - - -@mock_dynamodb2_deprecated -def test_failed_overwrite(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ], throughput={ - 'read': 7, - 'write': 3, - }) - - data1 = {'id': '123', 'range': 'abc', 'data': '678'} - table.put_item(data=data1) - - data2 = {'id': '123', 'range': 'abc', 'data': '345'} - table.put_item(data=data2, overwrite=True) - - data3 = {'id': '123', 'range': 'abc', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw( - ConditionalCheckFailedException) - - returned_item = table.lookup('123', 'abc') - dict(returned_item).should.equal(data2) - - data4 = {'id': '123', 'range': 'ghi', 'data': 812} - table.put_item(data=data4) - - returned_item = table.lookup('123', 'ghi') - dict(returned_item).should.equal(data4) - - -@mock_dynamodb2_deprecated -def test_conflicting_writes(): - table = Table.create('messages', schema=[ - HashKey('id'), - RangeKey('range'), - ]) - - item_data = {'id': '123', 'range': 'abc', 'data': '678'} - item1 = Item(table, item_data) - item2 = Item(table, item_data) - item1.save() - - item1['data'] = '579' - item2['data'] = '912' - - item1.save() - item2.save.when.called_with().should.throw(ConditionalCheckFailedException) - -""" -boto3 -""" - - -@mock_dynamodb2 -def test_boto3_conditions(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456' - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789' - }) - - # Test a query returning all items - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=True, - ) - expected = ["123", "456", "789"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all items again, but in reverse - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('1'), - ScanIndexForward=False, - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the subjects to only return some of the results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('234'), - ConsistentRead=True, - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").gt('9999') - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").begins_with('12') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key("subject").begins_with( - '7') & Key('forum_name').eq('the-key') - ) - results['Count'].should.equal(1) - - results = table.query( - KeyConditionExpression=Key('forum_name').eq( - 'the-key') & Key("subject").between('567', '890') - ) - results['Count'].should.equal(1) - - -@mock_dynamodb2 -def test_boto3_put_item_with_conditions(): - import botocore - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table = dynamodb.Table('users') - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123' - }) - - table.put_item( - Item={ - 'forum_name': 'the-key-2', - 'subject': '1234', - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'the-key', - 'subject': '123' - }, - ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - table.put_item.when.called_with( - Item={ - 'forum_name': 'bogus-key', - 'subject': 'bogus', - 'test': '123' - }, - ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' - ).should.throw(botocore.exceptions.ClientError) - - -def _create_table_with_range_key(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'created', - 'AttributeType': 'N' - } - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - return dynamodb.Table('users') - - -@mock_dynamodb2 -def test_update_item_range_key_set(): - table = _create_table_with_range_key() - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - result = table.update_item( - Key=item_key, - AttributeUpdates={ - 'username': { - 'Action': u'PUT', - 'Value': 'johndoe2' - }, - 'created': { - 'Action': u'PUT', - 'Value': Decimal('4'), - }, - 'mapfield': { - 'Action': u'PUT', - 'Value': {'key': 'value'}, - } - }, - ReturnValues='ALL_OLD', - ) - - assert not result.get('Attributes') - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'username': "johndoe2", - 'forum_name': 'the-key', - 'subject': '123', - 'created': '4', - 'mapfield': {'key': 'value'}, - }) - - -@mock_dynamodb2 -def test_update_item_add_value(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'numeric_field': Decimal('-1'), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '1', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_string_set(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'string_set': set(['str1', 'str2']), - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'string_set': { - 'Action': u'ADD', - 'Value': set(['str3']), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'string_set': set(['str1', 'str2', 'str3']), - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_add_value_does_not_exist_is_created(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - table.update_item( - Key=item_key, - AttributeUpdates={ - 'numeric_field': { - 'Action': u'ADD', - 'Value': Decimal('2'), - }, - }, - ) - - returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) - for k, v in table.get_item(Key=item_key)['Item'].items()) - dict(returned_item).should.equal({ - 'numeric_field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - -@mock_dynamodb2 -def test_update_item_with_expression(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'field': '1' - }) - - item_key = {'forum_name': 'the-key', 'subject': '123'} - - table.update_item( - Key=item_key, - UpdateExpression='SET field=2', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '2', - 'forum_name': 'the-key', - 'subject': '123', - }) - - table.update_item( - Key=item_key, - UpdateExpression='SET field = 3', - ) - dict(table.get_item(Key=item_key)['Item']).should.equal({ - 'field': '3', - 'forum_name': 'the-key', - 'subject': '123', - }) - -@mock_dynamodb2 -def test_update_item_add_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to add a string value to a string set - table.update_item( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': {'item4'} - } - ) - current_item['str_set'] = current_item['str_set'].union({'item4'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a num value to a num set - table.update_item( - Key=item_key, - UpdateExpression='ADD num_set :v', - ExpressionAttributeValues={ - ':v': {6} - } - ) - current_item['num_set'] = current_item['num_set'].union({6}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to add a value to a number value - table.update_item( - Key=item_key, - UpdateExpression='ADD num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ) - current_item['num_val'] = current_item['num_val'] + 20 - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number value to a string set, should raise Client Error - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to add a number set to the string set, should raise a ClientError - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': { 20 } - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set bad_value' - ).should.have.raised(ClientError) - - # Attempt to add a string value instead of a string set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='ADD str_set :v', - ExpressionAttributeValues={ - ':v': 'new_string' - } - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_update_item_delete_with_expression(): - table = _create_table_with_range_key() - - item_key = {'forum_name': 'the-key', 'subject': '123'} - current_item = { - 'forum_name': 'the-key', - 'subject': '123', - 'str_set': {'item1', 'item2', 'item3'}, - 'num_set': {1, 2, 3}, - 'num_val': 6 - } - - # Put an entry in the DB to play with - table.put_item(Item=current_item) - - # Update item to delete a string value from a string set - table.update_item( - Key=item_key, - UpdateExpression='DELETE str_set :v', - ExpressionAttributeValues={ - ':v': {'item2'} - } - ) - current_item['str_set'] = current_item['str_set'].difference({'item2'}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Update item to delete a num value from a num set - table.update_item( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {2} - } - ) - current_item['num_set'] = current_item['num_set'].difference({2}) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete on a number, this should fail - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val :v', - ExpressionAttributeValues={ - ':v': 20 - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Try to delete a string set from a number set - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_set :v', - ExpressionAttributeValues={ - ':v': {'del_str'} - } - ).should.have.raised(ClientError) - dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) - - # Attempt to update with a bad expression - table.update_item.when.called_with( - Key=item_key, - UpdateExpression='DELETE num_val badvalue' - ).should.have.raised(ClientError) - - -@mock_dynamodb2 -def test_boto3_query_gsi_range_comparison(): - table = _create_table_with_range_key() - - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '123', - 'username': 'johndoe', - 'created': 3, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '456', - 'username': 'johndoe', - 'created': 1, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '789', - 'username': 'johndoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '159', - 'username': 'janedoe', - 'created': 2, - }) - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '601', - 'username': 'janedoe', - 'created': 5, - }) - - # Test a query returning all johndoe items - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=True, - IndexName='TestGSI', - ) - expected = ["456", "789", "123"] - for index, item in enumerate(results['Items']): - item["subject"].should.equal(expected[index]) - - # Return all johndoe items again, but in reverse - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - ScanIndexForward=False, - IndexName='TestGSI', - ) - for index, item in enumerate(reversed(results['Items'])): - item["subject"].should.equal(expected[index]) - - # Filter the creation to only return some of the results - # And reverse order of hash + range key - results = table.query( - KeyConditionExpression=Key("created").gt( - 1) & Key('username').eq('johndoe'), - ConsistentRead=True, - IndexName='TestGSI', - ) - results['Count'].should.equal(2) - - # Filter to return no results - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").gt(9), - IndexName='TestGSI', - ) - results['Count'].should.equal(0) - - results = table.query( - KeyConditionExpression=Key('username').eq( - 'janedoe') & Key("created").eq(5), - IndexName='TestGSI', - ) - results['Count'].should.equal(1) - - # Test range key sorting - results = table.query( - KeyConditionExpression=Key('username').eq( - 'johndoe') & Key("created").gt(0), - IndexName='TestGSI', - ) - expected = [Decimal('1'), Decimal('2'), Decimal('3')] - for index, item in enumerate(results['Items']): - item["created"].should.equal(expected[index]) - - -@mock_dynamodb2 -def test_boto3_update_table_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(ProvisionedThroughput={ - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - }) - - table = dynamodb.Table('users') - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_boto3_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(3) - gsi_throughput['WriteCapacityUnits'].should.equal(4) - - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - - table = dynamodb.Table('users') - - # Primary throughput has not changed - table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) - table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - gsi_throughput['ReadCapacityUnits'].should.equal(10) - gsi_throughput['WriteCapacityUnits'].should.equal(11) - - -@mock_dynamodb2 -def test_update_table_gsi_create(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - - table.global_secondary_indexes.should.have.length_of(0) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Create': { - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(3) - assert gsi_throughput['WriteCapacityUnits'].should.equal(4) - - # Check update works - table.update(GlobalSecondaryIndexUpdates=[{ - 'Update': { - 'IndexName': 'TestGSI', - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 10, - 'WriteCapacityUnits': 11, - } - }, - }]) - table = dynamodb.Table('users') - - gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] - assert gsi_throughput['ReadCapacityUnits'].should.equal(10) - assert gsi_throughput['WriteCapacityUnits'].should.equal(11) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_update_table_gsi_throughput(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - # Create the DynamoDB table. - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'forum_name', - 'KeyType': 'HASH' - }, - { - 'AttributeName': 'subject', - 'KeyType': 'RANGE' - }, - ], - GlobalSecondaryIndexes=[{ - 'IndexName': 'TestGSI', - 'KeySchema': [ - { - 'AttributeName': 'username', - 'KeyType': 'HASH', - }, - { - 'AttributeName': 'created', - 'KeyType': 'RANGE', - } - ], - 'Projection': { - 'ProjectionType': 'ALL', - }, - 'ProvisionedThroughput': { - 'ReadCapacityUnits': 3, - 'WriteCapacityUnits': 4 - } - }], - AttributeDefinitions=[ - { - 'AttributeName': 'forum_name', - 'AttributeType': 'S' - }, - { - 'AttributeName': 'subject', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 6 - } - ) - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(1) - - table.update(GlobalSecondaryIndexUpdates=[{ - 'Delete': { - 'IndexName': 'TestGSI', - }, - }]) - - table = dynamodb.Table('users') - table.global_secondary_indexes.should.have.length_of(0) - - -@mock_dynamodb2 -def test_query_pagination(): - table = _create_table_with_range_key() - for i in range(10): - table.put_item(Item={ - 'forum_name': 'the-key', - 'subject': '{0}'.format(i), - 'username': 'johndoe', - 'created': Decimal('3'), - }) - - page1 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6 - ) - page1['Count'].should.equal(6) - page1['Items'].should.have.length_of(6) - page1.should.have.key('LastEvaluatedKey') - - page2 = table.query( - KeyConditionExpression=Key('forum_name').eq('the-key'), - Limit=6, - ExclusiveStartKey=page1['LastEvaluatedKey'] - ) - page2['Count'].should.equal(4) - page2['Items'].should.have.length_of(4) - page2.should_not.have.key('LastEvaluatedKey') - - results = page1['Items'] + page2['Items'] - subjects = set([int(r['subject']) for r in results]) - subjects.should.equal(set(range(10))) +from __future__ import unicode_literals + +from decimal import Decimal + +import boto +import boto3 +from boto3.dynamodb.conditions import Key +from botocore.exceptions import ClientError +import sure # noqa +from freezegun import freeze_time +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from boto.exception import JSONResponseError +from tests.helpers import requires_boto_gte +try: + from boto.dynamodb2.fields import GlobalAllIndex, HashKey, RangeKey, AllIndex + from boto.dynamodb2.table import Item, Table + from boto.dynamodb2.types import STRING, NUMBER + from boto.dynamodb2.exceptions import ValidationException + from boto.dynamodb2.exceptions import ConditionalCheckFailedException +except ImportError: + pass + + +def create_table(): + table = Table.create('messages', schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + return table + + +def create_table_with_local_indexes(): + table = Table.create( + 'messages', + schema=[ + HashKey('forum_name'), + RangeKey('subject'), + ], + throughput={ + 'read': 10, + 'write': 10, + }, + indexes=[ + AllIndex( + 'threads_index', + parts=[ + HashKey('forum_name', data_type=STRING), + RangeKey('threads', data_type=NUMBER), + ] + ) + ] + ) + return table + + +def iterate_results(res): + for i in res: + pass + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table(): + table = create_table() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [], + 'ItemCount': 0, 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table_with_local_index(): + table = create_table_with_local_indexes() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'}, + {'AttributeName': 'subject', 'AttributeType': 'S'}, + {'AttributeName': 'threads', 'AttributeType': 'N'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, + 'WriteCapacityUnits': 10, + 'ReadCapacityUnits': 10, + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'}, + {'KeyType': 'RANGE', 'AttributeName': 'subject'} + ], + 'LocalSecondaryIndexes': [ + { + 'IndexName': 'threads_index', + 'KeySchema': [ + {'AttributeName': 'forum_name', 'KeyType': 'HASH'}, + {'AttributeName': 'threads', 'KeyType': 'RANGE'} + ], + 'Projection': {'ProjectionType': 'ALL'} + } + ], + 'ItemCount': 0, + 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [] + } + } + table.describe().should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + table = create_table() + conn.list_tables()["TableNames"].should.have.length_of(1) + + table.delete() + conn.list_tables()["TableNames"].should.have.length_of(0) + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_update_table_throughput(): + table = create_table() + table.throughput["read"].should.equal(10) + table.throughput["write"].should.equal(10) + table.update(throughput={ + 'read': 5, + 'write': 15, + }) + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(15) + + table.update(throughput={ + 'read': 5, + 'write': 6, + }) + + table.describe() + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(6) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_add_and_describe_and_update(): + table = create_table() + ok = table.put_item(data={ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + ok.should.equal(True) + + table.get_item(forum_name="LOLCat Forum", + subject='Check this out!').should_not.be.none + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + returned_item['SentBy'] = 'User B' + returned_item.save(overwrite=True) + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='Check this out!' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'Check this out!', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_partial_save(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item( + forum_name="LOLCat Forum", subject='The LOLz') + + returned_item['SentBy'] = 'User B' + returned_item.partial_save() + + returned_item = table.get_item( + forum_name='LOLCat Forum', + subject='The LOLz' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'subject': 'The LOLz', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_put_without_table(): + table = Table('undeclared-table') + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_missing_item(): + table = create_table() + + table.get_item.when.called_with( + hash_key='tester', + range_key='other', + ).should.throw(ValidationException) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_with_undeclared_table(): + table = Table('undeclared-table') + table.get_item.when.called_with( + test_hash=3241526475).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_without_range_key(): + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + table.put_item(data={'test_hash': hash_key, 'test_range': range_key}) + table.get_item.when.called_with( + test_hash=hash_key).should.throw(ValidationException) + + +@requires_boto_gte("2.30.0") +@mock_dynamodb2_deprecated +def test_delete_item(): + table = create_table() + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item['subject'] = 'Check this out!' + item.save() + table.count().should.equal(1) + + response = item.delete() + response.should.equal(True) + + table.count().should.equal(0) + # Deletes are idempotent + item.delete().should.equal(True) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_item_with_undeclared_table(): + table = Table("undeclared-table") + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.delete.when.called_with().should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query(): + table = create_table() + + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'subject': 'Check this out!' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '456' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save(overwrite=True) + + item['forum_name'] = 'the-key' + item['subject'] = '789' + item.save(overwrite=True) + + table.count().should.equal(4) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + expected = ["123", "456", "789"] + for index, item in enumerate(results): + item["subject"].should.equal(expected[index]) + + results = table.query_2(forum_name__eq="the-key", + subject__gt='1', reverse=True) + for index, item in enumerate(results): + item["subject"].should.equal(expected[len(expected) - 1 - index]) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='1', consistent=True) + sum(1 for _ in results).should.equal(3) + + results = table.query_2(forum_name__eq='the-key', + subject__gt='234', consistent=True) + sum(1 for _ in results).should.equal(2) + + results = table.query_2(forum_name__eq='the-key', subject__gt='9999') + sum(1 for _ in results).should.equal(0) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='12') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', subject__beginswith='7') + sum(1 for _ in results).should.equal(1) + + results = table.query_2(forum_name__eq='the-key', + subject__between=['567', '890']) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_with_undeclared_table(): + table = Table('undeclared') + results = table.query( + forum_name__eq='Amazon DynamoDB', + subject__beginswith='DynamoDB', + limit=1 + ) + iterate_results.when.called_with(results).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item['forum_name'] = 'the-key' + item['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:09 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '789' + + item = Item(table, item_data) + item.save() + + results = table.scan() + sum(1 for _ in results).should.equal(3) + + results = table.scan(SentBy__eq='User B') + sum(1 for _ in results).should.equal(1) + + results = table.scan(Body__beginswith='http') + sum(1 for _ in results).should.equal(3) + + results = table.scan(Ids__null=False) + sum(1 for _ in results).should.equal(1) + + results = table.scan(Ids__null=True) + sum(1 for _ in results).should.equal(2) + + results = table.scan(PK__between=[8, 9]) + sum(1 for _ in results).should.equal(0) + + results = table.scan(PK__between=[5, 8]) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + conn.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_write_batch(): + table = create_table() + with table.batch_write() as batch: + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '123', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '789', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + table.count().should.equal(2) + with table.batch_write() as batch: + batch.delete_item( + forum_name='the-key', + subject='789' + ) + + table.count().should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_batch_read(): + table = create_table() + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + + item_data['forum_name'] = 'the-key' + item_data['subject'] = '456' + + item = Item(table, item_data) + item.save() + + item = Item(table, item_data) + item_data['forum_name'] = 'the-key' + item_data['subject'] = '123' + item.save() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = Item(table, item_data) + item_data['forum_name'] = 'another-key' + item_data['subject'] = '789' + item.save() + results = table.batch_get( + keys=[ + {'forum_name': 'the-key', 'subject': '123'}, + {'forum_name': 'another-key', 'subject': '789'}, + ] + ) + + # Iterate through so that batch_item gets called + count = len([x for x in results]) + count.should.equal(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_key_fields(): + table = create_table() + kf = table.get_key_fields() + kf.should.equal(['forum_name', 'subject']) + + +@mock_dynamodb2_deprecated +def test_create_with_global_indexes(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + ]) + + table_description = conn.describe_table("messages") + table_description['Table']["GlobalSecondaryIndexes"].should.equal([ + { + "IndexName": "topic-created_at-index", + "KeySchema": [ + { + "AttributeName": "topic", + "KeyType": "HASH" + }, + { + "AttributeName": "created_at", + "KeyType": "RANGE" + }, + ], + "Projection": { + "ProjectionType": "ALL" + }, + "ProvisionedThroughput": { + "ReadCapacityUnits": 6, + "WriteCapacityUnits": 1, + } + } + ]) + + +@mock_dynamodb2_deprecated +def test_query_with_global_indexes(): + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('version'), + ], global_indexes=[ + GlobalAllIndex('topic-created_at-index', + parts=[ + HashKey('topic'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 6, + 'write': 1 + } + ), + GlobalAllIndex('status-created_at-index', + parts=[ + HashKey('status'), + RangeKey('created_at', data_type='N') + ], + throughput={ + 'read': 2, + 'write': 1 + } + ) + ]) + + item_data = { + 'subject': 'Check this out!', + 'version': '1', + 'created_at': 0, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + + results = table.query(status__eq='active') + list(results).should.have.length_of(0) + + +@mock_dynamodb2_deprecated +def test_query_with_local_indexes(): + table = create_table_with_local_indexes() + item_data = { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + 'status': 'inactive' + } + item = Item(table, item_data) + item.save(overwrite=True) + + item['version'] = '2' + item.save(overwrite=True) + results = table.query(forum_name__eq='Cool Forum', + index='threads_index', threads__eq=1) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_eq(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + results = table.query_2( + forum_name__eq='Cool Forum', index='threads_index', threads__eq=5 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lt=5 + ) + results = list(results) + results.should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gt(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gt=1 + ) + list(results).should.have.length_of(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_lte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__lte=5 + ) + list(results).should.have.length_of(3) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_filter_gte(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '1', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '1', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', index='threads_index', threads__gte=1 + ) + list(results).should.have.length_of(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_non_hash_range_key(): + table = create_table_with_local_indexes() + item_data = [ + { + 'forum_name': 'Cool Forum', + 'subject': 'Check this out!', + 'version': '1', + 'threads': 1, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Read this now!', + 'version': '3', + 'threads': 5, + }, + { + 'forum_name': 'Cool Forum', + 'subject': 'Please read this... please', + 'version': '2', + 'threads': 0, + } + ] + for data in item_data: + item = Item(table, data) + item.save(overwrite=True) + + results = table.query( + forum_name__eq='Cool Forum', version__gt="2" + ) + results = list(results) + results.should.have.length_of(1) + + results = table.query( + forum_name__eq='Cool Forum', version__lt="3" + ) + results = list(results) + results.should.have.length_of(2) + + +@mock_dynamodb2_deprecated +def test_reverse_query(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + table = Table.create('messages', schema=[ + HashKey('subject'), + RangeKey('created_at', data_type='N') + ]) + + for i in range(10): + table.put_item({ + 'subject': "Hi", + 'created_at': i + }) + + results = table.query_2(subject__eq="Hi", + created_at__lt=6, + limit=4, + reverse=True) + + expected = [Decimal(5), Decimal(4), Decimal(3), Decimal(2)] + [r['created_at'] for r in results].should.equal(expected) + + +@mock_dynamodb2_deprecated +def test_lookup(): + from decimal import Decimal + table = Table.create('messages', schema=[ + HashKey('test_hash'), + RangeKey('test_range'), + ], throughput={ + 'read': 10, + 'write': 10, + }) + + hash_key = 3241526475 + range_key = 1234567890987 + data = {'test_hash': hash_key, 'test_range': range_key} + table.put_item(data=data) + message = table.lookup(hash_key, range_key) + message.get('test_hash').should.equal(Decimal(hash_key)) + message.get('test_range').should.equal(Decimal(range_key)) + + +@mock_dynamodb2_deprecated +def test_failed_overwrite(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ], throughput={ + 'read': 7, + 'write': 3, + }) + + data1 = {'id': '123', 'range': 'abc', 'data': '678'} + table.put_item(data=data1) + + data2 = {'id': '123', 'range': 'abc', 'data': '345'} + table.put_item(data=data2, overwrite=True) + + data3 = {'id': '123', 'range': 'abc', 'data': '812'} + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) + + returned_item = table.lookup('123', 'abc') + dict(returned_item).should.equal(data2) + + data4 = {'id': '123', 'range': 'ghi', 'data': 812} + table.put_item(data=data4) + + returned_item = table.lookup('123', 'ghi') + dict(returned_item).should.equal(data4) + + +@mock_dynamodb2_deprecated +def test_conflicting_writes(): + table = Table.create('messages', schema=[ + HashKey('id'), + RangeKey('range'), + ]) + + item_data = {'id': '123', 'range': 'abc', 'data': '678'} + item1 = Item(table, item_data) + item2 = Item(table, item_data) + item1.save() + + item1['data'] = '579' + item2['data'] = '912' + + item1.save() + item2.save.when.called_with().should.throw(ConditionalCheckFailedException) + +""" +boto3 +""" + + +@mock_dynamodb2 +def test_boto3_conditions(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456' + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789' + }) + + # Test a query returning all items + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=True, + ) + expected = ["123", "456", "789"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all items again, but in reverse + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('1'), + ScanIndexForward=False, + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the subjects to only return some of the results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('234'), + ConsistentRead=True, + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").gt('9999') + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").begins_with('12') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key("subject").begins_with( + '7') & Key('forum_name').eq('the-key') + ) + results['Count'].should.equal(1) + + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key') & Key("subject").between('567', '890') + ) + results['Count'].should.equal(1) + + +@mock_dynamodb2 +def test_boto3_put_item_with_conditions(): + import botocore + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.put_item( + Item={ + 'forum_name': 'the-key-2', + 'subject': '1234', + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'the-key', + 'subject': '123' + }, + ConditionExpression='attribute_not_exists(forum_name) AND attribute_not_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + table.put_item.when.called_with( + Item={ + 'forum_name': 'bogus-key', + 'subject': 'bogus', + 'test': '123' + }, + ConditionExpression='attribute_exists(forum_name) AND attribute_exists(subject)' + ).should.throw(botocore.exceptions.ClientError) + + +def _create_table_with_range_key(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'created', + 'AttributeType': 'N' + } + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + return dynamodb.Table('users') + + +@mock_dynamodb2 +def test_update_item_range_key_set(): + table = _create_table_with_range_key() + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + result = table.update_item( + Key=item_key, + AttributeUpdates={ + 'username': { + 'Action': u'PUT', + 'Value': 'johndoe2' + }, + 'created': { + 'Action': u'PUT', + 'Value': Decimal('4'), + }, + 'mapfield': { + 'Action': u'PUT', + 'Value': {'key': 'value'}, + } + }, + ReturnValues='ALL_OLD', + ) + + assert not result.get('Attributes') + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'username': "johndoe2", + 'forum_name': 'the-key', + 'subject': '123', + 'created': '4', + 'mapfield': {'key': 'value'}, + }) + + +@mock_dynamodb2 +def test_update_item_add_value(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'numeric_field': Decimal('-1'), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '1', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_add_value_string_set(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'string_set': set(['str1', 'str2']), + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'string_set': { + 'Action': u'ADD', + 'Value': set(['str3']), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'string_set': set(['str1', 'str2', 'str3']), + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_add_value_does_not_exist_is_created(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + table.update_item( + Key=item_key, + AttributeUpdates={ + 'numeric_field': { + 'Action': u'ADD', + 'Value': Decimal('2'), + }, + }, + ) + + returned_item = dict((k, str(v) if isinstance(v, Decimal) else v) + for k, v in table.get_item(Key=item_key)['Item'].items()) + dict(returned_item).should.equal({ + 'numeric_field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + +@mock_dynamodb2 +def test_update_item_with_expression(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'field': '1' + }) + + item_key = {'forum_name': 'the-key', 'subject': '123'} + + table.update_item( + Key=item_key, + UpdateExpression='SET field=2', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '2', + 'forum_name': 'the-key', + 'subject': '123', + }) + + table.update_item( + Key=item_key, + UpdateExpression='SET field = 3', + ) + dict(table.get_item(Key=item_key)['Item']).should.equal({ + 'field': '3', + 'forum_name': 'the-key', + 'subject': '123', + }) + +@mock_dynamodb2 +def test_update_item_add_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to add a string value to a string set + table.update_item( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': {'item4'} + } + ) + current_item['str_set'] = current_item['str_set'].union({'item4'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a num value to a num set + table.update_item( + Key=item_key, + UpdateExpression='ADD num_set :v', + ExpressionAttributeValues={ + ':v': {6} + } + ) + current_item['num_set'] = current_item['num_set'].union({6}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to add a value to a number value + table.update_item( + Key=item_key, + UpdateExpression='ADD num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ) + current_item['num_val'] = current_item['num_val'] + 20 + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number value to a string set, should raise Client Error + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to add a number set to the string set, should raise a ClientError + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': { 20 } + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set bad_value' + ).should.have.raised(ClientError) + + # Attempt to add a string value instead of a string set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='ADD str_set :v', + ExpressionAttributeValues={ + ':v': 'new_string' + } + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_update_item_delete_with_expression(): + table = _create_table_with_range_key() + + item_key = {'forum_name': 'the-key', 'subject': '123'} + current_item = { + 'forum_name': 'the-key', + 'subject': '123', + 'str_set': {'item1', 'item2', 'item3'}, + 'num_set': {1, 2, 3}, + 'num_val': 6 + } + + # Put an entry in the DB to play with + table.put_item(Item=current_item) + + # Update item to delete a string value from a string set + table.update_item( + Key=item_key, + UpdateExpression='DELETE str_set :v', + ExpressionAttributeValues={ + ':v': {'item2'} + } + ) + current_item['str_set'] = current_item['str_set'].difference({'item2'}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Update item to delete a num value from a num set + table.update_item( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {2} + } + ) + current_item['num_set'] = current_item['num_set'].difference({2}) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete on a number, this should fail + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val :v', + ExpressionAttributeValues={ + ':v': 20 + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Try to delete a string set from a number set + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_set :v', + ExpressionAttributeValues={ + ':v': {'del_str'} + } + ).should.have.raised(ClientError) + dict(table.get_item(Key=item_key)['Item']).should.equal(current_item) + + # Attempt to update with a bad expression + table.update_item.when.called_with( + Key=item_key, + UpdateExpression='DELETE num_val badvalue' + ).should.have.raised(ClientError) + + +@mock_dynamodb2 +def test_boto3_query_gsi_range_comparison(): + table = _create_table_with_range_key() + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'username': 'johndoe', + 'created': 3, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '456', + 'username': 'johndoe', + 'created': 1, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '789', + 'username': 'johndoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '159', + 'username': 'janedoe', + 'created': 2, + }) + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '601', + 'username': 'janedoe', + 'created': 5, + }) + + # Test a query returning all johndoe items + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=True, + IndexName='TestGSI', + ) + expected = ["456", "789", "123"] + for index, item in enumerate(results['Items']): + item["subject"].should.equal(expected[index]) + + # Return all johndoe items again, but in reverse + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + ScanIndexForward=False, + IndexName='TestGSI', + ) + for index, item in enumerate(reversed(results['Items'])): + item["subject"].should.equal(expected[index]) + + # Filter the creation to only return some of the results + # And reverse order of hash + range key + results = table.query( + KeyConditionExpression=Key("created").gt( + 1) & Key('username').eq('johndoe'), + ConsistentRead=True, + IndexName='TestGSI', + ) + results['Count'].should.equal(2) + + # Filter to return no results + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").gt(9), + IndexName='TestGSI', + ) + results['Count'].should.equal(0) + + results = table.query( + KeyConditionExpression=Key('username').eq( + 'janedoe') & Key("created").eq(5), + IndexName='TestGSI', + ) + results['Count'].should.equal(1) + + # Test range key sorting + results = table.query( + KeyConditionExpression=Key('username').eq( + 'johndoe') & Key("created").gt(0), + IndexName='TestGSI', + ) + expected = [Decimal('1'), Decimal('2'), Decimal('3')] + for index, item in enumerate(results['Items']): + item["created"].should.equal(expected[index]) + + +@mock_dynamodb2 +def test_boto3_update_table_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(ProvisionedThroughput={ + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + }) + + table = dynamodb.Table('users') + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(10) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_boto3_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(3) + gsi_throughput['WriteCapacityUnits'].should.equal(4) + + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + + table = dynamodb.Table('users') + + # Primary throughput has not changed + table.provisioned_throughput['ReadCapacityUnits'].should.equal(5) + table.provisioned_throughput['WriteCapacityUnits'].should.equal(6) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + gsi_throughput['ReadCapacityUnits'].should.equal(10) + gsi_throughput['WriteCapacityUnits'].should.equal(11) + + +@mock_dynamodb2 +def test_update_table_gsi_create(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + + table.global_secondary_indexes.should.have.length_of(0) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Create': { + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(3) + assert gsi_throughput['WriteCapacityUnits'].should.equal(4) + + # Check update works + table.update(GlobalSecondaryIndexUpdates=[{ + 'Update': { + 'IndexName': 'TestGSI', + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 10, + 'WriteCapacityUnits': 11, + } + }, + }]) + table = dynamodb.Table('users') + + gsi_throughput = table.global_secondary_indexes[0]['ProvisionedThroughput'] + assert gsi_throughput['ReadCapacityUnits'].should.equal(10) + assert gsi_throughput['WriteCapacityUnits'].should.equal(11) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_update_table_gsi_throughput(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + GlobalSecondaryIndexes=[{ + 'IndexName': 'TestGSI', + 'KeySchema': [ + { + 'AttributeName': 'username', + 'KeyType': 'HASH', + }, + { + 'AttributeName': 'created', + 'KeyType': 'RANGE', + } + ], + 'Projection': { + 'ProjectionType': 'ALL', + }, + 'ProvisionedThroughput': { + 'ReadCapacityUnits': 3, + 'WriteCapacityUnits': 4 + } + }], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 6 + } + ) + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(1) + + table.update(GlobalSecondaryIndexUpdates=[{ + 'Delete': { + 'IndexName': 'TestGSI', + }, + }]) + + table = dynamodb.Table('users') + table.global_secondary_indexes.should.have.length_of(0) + + +@mock_dynamodb2 +def test_query_pagination(): + table = _create_table_with_range_key() + for i in range(10): + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '{0}'.format(i), + 'username': 'johndoe', + 'created': Decimal('3'), + }) + + page1 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6 + ) + page1['Count'].should.equal(6) + page1['Items'].should.have.length_of(6) + page1.should.have.key('LastEvaluatedKey') + + page2 = table.query( + KeyConditionExpression=Key('forum_name').eq('the-key'), + Limit=6, + ExclusiveStartKey=page1['LastEvaluatedKey'] + ) + page2['Count'].should.equal(4) + page2['Items'].should.have.length_of(4) + page2.should_not.have.key('LastEvaluatedKey') + + results = page1['Items'] + page2['Items'] + subjects = set([int(r['subject']) for r in results]) + subjects.should.equal(set(range(10))) diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 15e5284b7..faa826fb0 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -1,790 +1,790 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto3.dynamodb.conditions import Key -import sure # noqa -from freezegun import freeze_time -from boto.exception import JSONResponseError -from moto import mock_dynamodb2, mock_dynamodb2_deprecated -from tests.helpers import requires_boto_gte -import botocore -try: - from boto.dynamodb2.fields import HashKey - from boto.dynamodb2.table import Table - from boto.dynamodb2.table import Item - from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound -except ImportError: - pass - - -def create_table(): - table = Table.create('messages', schema=[ - HashKey('forum_name') - ], throughput={ - 'read': 10, - 'write': 10, - }) - return table - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -@freeze_time("2012-01-14") -def test_create_table(): - create_table() - expected = { - 'Table': { - 'AttributeDefinitions': [ - {'AttributeName': 'forum_name', 'AttributeType': 'S'} - ], - 'ProvisionedThroughput': { - 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 - }, - 'TableSizeBytes': 0, - 'TableName': 'messages', - 'TableStatus': 'ACTIVE', - 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', - 'KeySchema': [ - {'KeyType': 'HASH', 'AttributeName': 'forum_name'} - ], - 'ItemCount': 0, 'CreationDateTime': 1326499200.0, - 'GlobalSecondaryIndexes': [], - 'LocalSecondaryIndexes': [] - } - } - conn = boto.dynamodb2.connect_to_region( - 'us-east-1', - aws_access_key_id="ak", - aws_secret_access_key="sk" - ) - - conn.describe_table('messages').should.equal(expected) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_table(): - create_table() - conn = boto.dynamodb2.layer1.DynamoDBConnection() - conn.list_tables()["TableNames"].should.have.length_of(1) - - conn.delete_table('messages') - conn.list_tables()["TableNames"].should.have.length_of(0) - - conn.delete_table.when.called_with( - 'messages').should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_update_table_throughput(): - table = create_table() - table.throughput["read"].should.equal(10) - table.throughput["write"].should.equal(10) - - table.update(throughput={ - 'read': 5, - 'write': 6, - }) - - table.throughput["read"].should.equal(5) - table.throughput["write"].should.equal(6) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_add_and_describe_and_update(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum") - returned_item.should_not.be.none - - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - }) - - returned_item['SentBy'] = 'User B' - returned_item.save(overwrite=True) - - returned_item = table.get_item( - forum_name='LOLCat Forum' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_partial_save(): - table = create_table() - - data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - - table.put_item(data=data) - returned_item = table.get_item(forum_name="LOLCat Forum") - - returned_item['SentBy'] = 'User B' - returned_item.partial_save() - - returned_item = table.get_item( - forum_name='LOLCat Forum' - ) - dict(returned_item).should.equal({ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - }) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_item_put_without_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.put_item.when.called_with( - table_name='undeclared-table', - item={ - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - } - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_item_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.get_item.when.called_with( - table_name='undeclared-table', - key={"forum_name": {"S": "LOLCat Forum"}}, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.30.0") -@mock_dynamodb2_deprecated -def test_delete_item(): - table = create_table() - - item_data = { - 'forum_name': 'LOLCat Forum', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save() - table.count().should.equal(1) - - response = item.delete() - - response.should.equal(True) - - table.count().should.equal(0) - - # Deletes are idempotent and 'False' here would imply an error condition - item.delete().should.equal(True) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_delete_item_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.delete_item.when.called_with( - table_name='undeclared-table', - key={"forum_name": {"S": "LOLCat Forum"}}, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query(): - table = create_table() - - item_data = { - 'forum_name': 'the-key', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item = Item(table, item_data) - item.save(overwrite=True) - table.count().should.equal(1) - table = Table("messages") - - results = table.query(forum_name__eq='the-key') - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_query_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.query.when.called_with( - table_name='undeclared-table', - key_conditions={"forum_name": { - "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan(): - table = create_table() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key' - - item = Item(table, item_data) - item.save() - - item['forum_name'] = 'the-key2' - item.save(overwrite=True) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item_data['forum_name'] = 'the-key3' - item = Item(table, item_data) - item.save() - - results = table.scan() - sum(1 for _ in results).should.equal(3) - - results = table.scan(SentBy__eq='User B') - sum(1 for _ in results).should.equal(1) - - results = table.scan(Body__beginswith='http') - sum(1 for _ in results).should.equal(3) - - results = table.scan(Ids__null=False) - sum(1 for _ in results).should.equal(1) - - results = table.scan(Ids__null=True) - sum(1 for _ in results).should.equal(2) - - results = table.scan(PK__between=[8, 9]) - sum(1 for _ in results).should.equal(0) - - results = table.scan(PK__between=[5, 8]) - sum(1 for _ in results).should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_scan_with_undeclared_table(): - conn = boto.dynamodb2.layer1.DynamoDBConnection() - - conn.scan.when.called_with( - table_name='undeclared-table', - scan_filter={ - "SentBy": { - "AttributeValueList": [{ - "S": "User B"} - ], - "ComparisonOperator": "EQ" - } - }, - ).should.throw(JSONResponseError) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_write_batch(): - table = create_table() - - with table.batch_write() as batch: - batch.put_item(data={ - 'forum_name': 'the-key', - 'subject': '123', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - batch.put_item(data={ - 'forum_name': 'the-key2', - 'subject': '789', - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - }) - - table.count().should.equal(2) - with table.batch_write() as batch: - batch.delete_item( - forum_name='the-key', - subject='789' - ) - - table.count().should.equal(1) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_batch_read(): - table = create_table() - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User A', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - } - item_data['forum_name'] = 'the-key1' - item = Item(table, item_data) - item.save() - - item = Item(table, item_data) - item_data['forum_name'] = 'the-key2' - item.save(overwrite=True) - - item_data = { - 'Body': 'http://url_to_lolcat.gif', - 'SentBy': 'User B', - 'ReceivedTime': '12/9/2011 11:36:03 PM', - 'Ids': set([1, 2, 3]), - 'PK': 7, - } - item = Item(table, item_data) - item_data['forum_name'] = 'another-key' - item.save(overwrite=True) - - results = table.batch_get( - keys=[ - {'forum_name': 'the-key1'}, - {'forum_name': 'another-key'}, - ] - ) - - # Iterate through so that batch_item gets called - count = len([x for x in results]) - count.should.equal(2) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_key_fields(): - table = create_table() - kf = table.get_key_fields() - kf[0].should.equal('forum_name') - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_missing_item(): - table = create_table() - table.get_item.when.called_with( - forum_name='missing').should.throw(ItemNotFound) - - -@requires_boto_gte("2.9") -@mock_dynamodb2_deprecated -def test_get_special_item(): - table = Table.create('messages', schema=[ - HashKey('date-joined') - ], throughput={ - 'read': 10, - 'write': 10, - }) - - data = { - 'date-joined': 127549192, - 'SentBy': 'User A', - } - table.put_item(data=data) - returned_item = table.get_item(**{'date-joined': 127549192}) - dict(returned_item).should.equal(data) - - -@mock_dynamodb2_deprecated -def test_update_item_remove(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create('messages', schema=[ - HashKey('username') - ]) - - data = { - 'username': "steve", - 'SentBy': 'User A', - 'SentTo': 'User B', - } - table.put_item(data=data) - key_map = { - 'username': {"S": "steve"} - } - - # Then remove the SentBy field - conn.update_item("messages", key_map, - update_expression="REMOVE SentBy, SentTo") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal({ - 'username': "steve", - }) - - -@mock_dynamodb2_deprecated -def test_update_item_set(): - conn = boto.dynamodb2.connect_to_region("us-east-1") - table = Table.create('messages', schema=[ - HashKey('username') - ]) - - data = { - 'username': "steve", - 'SentBy': 'User A', - } - table.put_item(data=data) - key_map = { - 'username': {"S": "steve"} - } - - conn.update_item("messages", key_map, - update_expression="SET foo=bar, blah=baz REMOVE SentBy") - - returned_item = table.get_item(username="steve") - dict(returned_item).should.equal({ - 'username': "steve", - 'foo': 'bar', - 'blah': 'baz', - }) - - -@mock_dynamodb2_deprecated -def test_failed_overwrite(): - table = Table.create('messages', schema=[ - HashKey('id'), - ], throughput={ - 'read': 7, - 'write': 3, - }) - - data1 = {'id': '123', 'data': '678'} - table.put_item(data=data1) - - data2 = {'id': '123', 'data': '345'} - table.put_item(data=data2, overwrite=True) - - data3 = {'id': '123', 'data': '812'} - table.put_item.when.called_with(data=data3).should.throw( - ConditionalCheckFailedException) - - returned_item = table.lookup('123') - dict(returned_item).should.equal(data2) - - data4 = {'id': '124', 'data': 812} - table.put_item(data=data4) - - returned_item = table.lookup('124') - dict(returned_item).should.equal(data4) - - -@mock_dynamodb2_deprecated -def test_conflicting_writes(): - table = Table.create('messages', schema=[ - HashKey('id'), - ]) - - item_data = {'id': '123', 'data': '678'} - item1 = Item(table, item_data) - item2 = Item(table, item_data) - item1.save() - - item1['data'] = '579' - item2['data'] = '912' - - item1.save() - item2.save.when.called_with().should.throw(ConditionalCheckFailedException) - - -""" -boto3 -""" - - -@mock_dynamodb2 -def test_boto3_create_table(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'username', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - table.name.should.equal('users') - - -def _create_user_table(): - dynamodb = boto3.resource('dynamodb', region_name='us-east-1') - - table = dynamodb.create_table( - TableName='users', - KeySchema=[ - { - 'AttributeName': 'username', - 'KeyType': 'HASH' - }, - ], - AttributeDefinitions=[ - { - 'AttributeName': 'username', - 'AttributeType': 'S' - }, - ], - ProvisionedThroughput={ - 'ReadCapacityUnits': 5, - 'WriteCapacityUnits': 5 - } - ) - return dynamodb.Table('users') - - -@mock_dynamodb2 -def test_boto3_conditions(): - table = _create_user_table() - - table.put_item(Item={'username': 'johndoe'}) - table.put_item(Item={'username': 'janedoe'}) - - response = table.query( - KeyConditionExpression=Key('username').eq('johndoe') - ) - response['Count'].should.equal(1) - response['Items'].should.have.length_of(1) - response['Items'][0].should.equal({"username": "johndoe"}) - - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'EQ', - 'AttributeValueList': ['bar'] - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'whatever': { - 'ComparisonOperator': 'NULL', - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'NOT_NULL', - } - }) - final_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(final_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_fail(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item.when.called_with( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'NE', - 'AttributeValueList': ['bar'] - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'Value': 'bar', - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail_because_expect_not_exists(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'Exists': False - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) - table.update_item.when.called_with( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=bar', - Expected={ - 'foo': { - 'ComparisonOperator': 'NULL', - } - }).should.throw(botocore.client.ClientError) - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'foo': { - 'Value': 'bar', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_not_exists(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'whatever': { - 'Exists': False, - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'whatever': { - 'ComparisonOperator': 'NULL', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.update_item( - Key={'username': 'johndoe'}, - UpdateExpression='SET foo=baz', - Expected={ - 'foo': { - 'ComparisonOperator': 'NOT_NULL', - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - -@mock_dynamodb2 -def test_boto3_put_item_conditions_pass(): - table = _create_user_table() - table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) - table.put_item( - Item={'username': 'johndoe', 'foo': 'baz'}, - Expected={ - 'foo': { - 'ComparisonOperator': 'EQ', - 'AttributeValueList': ['bar'] - } - }) - returned_item = table.get_item(Key={'username': 'johndoe'}) - assert dict(returned_item)['Item']['foo'].should.equal("baz") - - -@mock_dynamodb2 -def test_scan_pagination(): - table = _create_user_table() - - expected_usernames = ['user{0}'.format(i) for i in range(10)] - for u in expected_usernames: - table.put_item(Item={'username': u}) - - page1 = table.scan(Limit=6) - page1['Count'].should.equal(6) - page1['Items'].should.have.length_of(6) - page1.should.have.key('LastEvaluatedKey') - - page2 = table.scan(Limit=6, - ExclusiveStartKey=page1['LastEvaluatedKey']) - page2['Count'].should.equal(4) - page2['Items'].should.have.length_of(4) - page2.should_not.have.key('LastEvaluatedKey') - - results = page1['Items'] + page2['Items'] - usernames = set([r['username'] for r in results]) - usernames.should.equal(set(expected_usernames)) +from __future__ import unicode_literals + +import boto +import boto3 +from boto3.dynamodb.conditions import Key +import sure # noqa +from freezegun import freeze_time +from boto.exception import JSONResponseError +from moto import mock_dynamodb2, mock_dynamodb2_deprecated +from tests.helpers import requires_boto_gte +import botocore +try: + from boto.dynamodb2.fields import HashKey + from boto.dynamodb2.table import Table + from boto.dynamodb2.table import Item + from boto.dynamodb2.exceptions import ConditionalCheckFailedException, ItemNotFound +except ImportError: + pass + + +def create_table(): + table = Table.create('messages', schema=[ + HashKey('forum_name') + ], throughput={ + 'read': 10, + 'write': 10, + }) + return table + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +@freeze_time("2012-01-14") +def test_create_table(): + create_table() + expected = { + 'Table': { + 'AttributeDefinitions': [ + {'AttributeName': 'forum_name', 'AttributeType': 'S'} + ], + 'ProvisionedThroughput': { + 'NumberOfDecreasesToday': 0, 'WriteCapacityUnits': 10, 'ReadCapacityUnits': 10 + }, + 'TableSizeBytes': 0, + 'TableName': 'messages', + 'TableStatus': 'ACTIVE', + 'TableArn': 'arn:aws:dynamodb:us-east-1:123456789011:table/messages', + 'KeySchema': [ + {'KeyType': 'HASH', 'AttributeName': 'forum_name'} + ], + 'ItemCount': 0, 'CreationDateTime': 1326499200.0, + 'GlobalSecondaryIndexes': [], + 'LocalSecondaryIndexes': [] + } + } + conn = boto.dynamodb2.connect_to_region( + 'us-east-1', + aws_access_key_id="ak", + aws_secret_access_key="sk" + ) + + conn.describe_table('messages').should.equal(expected) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_table(): + create_table() + conn = boto.dynamodb2.layer1.DynamoDBConnection() + conn.list_tables()["TableNames"].should.have.length_of(1) + + conn.delete_table('messages') + conn.list_tables()["TableNames"].should.have.length_of(0) + + conn.delete_table.when.called_with( + 'messages').should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_update_table_throughput(): + table = create_table() + table.throughput["read"].should.equal(10) + table.throughput["write"].should.equal(10) + + table.update(throughput={ + 'read': 5, + 'write': 6, + }) + + table.throughput["read"].should.equal(5) + table.throughput["write"].should.equal(6) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_add_and_describe_and_update(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item(forum_name="LOLCat Forum") + returned_item.should_not.be.none + + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + }) + + returned_item['SentBy'] = 'User B' + returned_item.save(overwrite=True) + + returned_item = table.get_item( + forum_name='LOLCat Forum' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_partial_save(): + table = create_table() + + data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + + table.put_item(data=data) + returned_item = table.get_item(forum_name="LOLCat Forum") + + returned_item['SentBy'] = 'User B' + returned_item.partial_save() + + returned_item = table.get_item( + forum_name='LOLCat Forum' + ) + dict(returned_item).should.equal({ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + }) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_item_put_without_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.put_item.when.called_with( + table_name='undeclared-table', + item={ + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + } + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_item_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.get_item.when.called_with( + table_name='undeclared-table', + key={"forum_name": {"S": "LOLCat Forum"}}, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.30.0") +@mock_dynamodb2_deprecated +def test_delete_item(): + table = create_table() + + item_data = { + 'forum_name': 'LOLCat Forum', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save() + table.count().should.equal(1) + + response = item.delete() + + response.should.equal(True) + + table.count().should.equal(0) + + # Deletes are idempotent and 'False' here would imply an error condition + item.delete().should.equal(True) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_delete_item_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.delete_item.when.called_with( + table_name='undeclared-table', + key={"forum_name": {"S": "LOLCat Forum"}}, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query(): + table = create_table() + + item_data = { + 'forum_name': 'the-key', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item = Item(table, item_data) + item.save(overwrite=True) + table.count().should.equal(1) + table = Table("messages") + + results = table.query(forum_name__eq='the-key') + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_query_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.query.when.called_with( + table_name='undeclared-table', + key_conditions={"forum_name": { + "ComparisonOperator": "EQ", "AttributeValueList": [{"S": "the-key"}]}} + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan(): + table = create_table() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key' + + item = Item(table, item_data) + item.save() + + item['forum_name'] = 'the-key2' + item.save(overwrite=True) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item_data['forum_name'] = 'the-key3' + item = Item(table, item_data) + item.save() + + results = table.scan() + sum(1 for _ in results).should.equal(3) + + results = table.scan(SentBy__eq='User B') + sum(1 for _ in results).should.equal(1) + + results = table.scan(Body__beginswith='http') + sum(1 for _ in results).should.equal(3) + + results = table.scan(Ids__null=False) + sum(1 for _ in results).should.equal(1) + + results = table.scan(Ids__null=True) + sum(1 for _ in results).should.equal(2) + + results = table.scan(PK__between=[8, 9]) + sum(1 for _ in results).should.equal(0) + + results = table.scan(PK__between=[5, 8]) + sum(1 for _ in results).should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_scan_with_undeclared_table(): + conn = boto.dynamodb2.layer1.DynamoDBConnection() + + conn.scan.when.called_with( + table_name='undeclared-table', + scan_filter={ + "SentBy": { + "AttributeValueList": [{ + "S": "User B"} + ], + "ComparisonOperator": "EQ" + } + }, + ).should.throw(JSONResponseError) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_write_batch(): + table = create_table() + + with table.batch_write() as batch: + batch.put_item(data={ + 'forum_name': 'the-key', + 'subject': '123', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + batch.put_item(data={ + 'forum_name': 'the-key2', + 'subject': '789', + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + }) + + table.count().should.equal(2) + with table.batch_write() as batch: + batch.delete_item( + forum_name='the-key', + subject='789' + ) + + table.count().should.equal(1) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_batch_read(): + table = create_table() + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User A', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + } + item_data['forum_name'] = 'the-key1' + item = Item(table, item_data) + item.save() + + item = Item(table, item_data) + item_data['forum_name'] = 'the-key2' + item.save(overwrite=True) + + item_data = { + 'Body': 'http://url_to_lolcat.gif', + 'SentBy': 'User B', + 'ReceivedTime': '12/9/2011 11:36:03 PM', + 'Ids': set([1, 2, 3]), + 'PK': 7, + } + item = Item(table, item_data) + item_data['forum_name'] = 'another-key' + item.save(overwrite=True) + + results = table.batch_get( + keys=[ + {'forum_name': 'the-key1'}, + {'forum_name': 'another-key'}, + ] + ) + + # Iterate through so that batch_item gets called + count = len([x for x in results]) + count.should.equal(2) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_key_fields(): + table = create_table() + kf = table.get_key_fields() + kf[0].should.equal('forum_name') + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_missing_item(): + table = create_table() + table.get_item.when.called_with( + forum_name='missing').should.throw(ItemNotFound) + + +@requires_boto_gte("2.9") +@mock_dynamodb2_deprecated +def test_get_special_item(): + table = Table.create('messages', schema=[ + HashKey('date-joined') + ], throughput={ + 'read': 10, + 'write': 10, + }) + + data = { + 'date-joined': 127549192, + 'SentBy': 'User A', + } + table.put_item(data=data) + returned_item = table.get_item(**{'date-joined': 127549192}) + dict(returned_item).should.equal(data) + + +@mock_dynamodb2_deprecated +def test_update_item_remove(): + conn = boto.dynamodb2.connect_to_region("us-east-1") + table = Table.create('messages', schema=[ + HashKey('username') + ]) + + data = { + 'username': "steve", + 'SentBy': 'User A', + 'SentTo': 'User B', + } + table.put_item(data=data) + key_map = { + 'username': {"S": "steve"} + } + + # Then remove the SentBy field + conn.update_item("messages", key_map, + update_expression="REMOVE SentBy, SentTo") + + returned_item = table.get_item(username="steve") + dict(returned_item).should.equal({ + 'username': "steve", + }) + + +@mock_dynamodb2_deprecated +def test_update_item_set(): + conn = boto.dynamodb2.connect_to_region("us-east-1") + table = Table.create('messages', schema=[ + HashKey('username') + ]) + + data = { + 'username': "steve", + 'SentBy': 'User A', + } + table.put_item(data=data) + key_map = { + 'username': {"S": "steve"} + } + + conn.update_item("messages", key_map, + update_expression="SET foo=bar, blah=baz REMOVE SentBy") + + returned_item = table.get_item(username="steve") + dict(returned_item).should.equal({ + 'username': "steve", + 'foo': 'bar', + 'blah': 'baz', + }) + + +@mock_dynamodb2_deprecated +def test_failed_overwrite(): + table = Table.create('messages', schema=[ + HashKey('id'), + ], throughput={ + 'read': 7, + 'write': 3, + }) + + data1 = {'id': '123', 'data': '678'} + table.put_item(data=data1) + + data2 = {'id': '123', 'data': '345'} + table.put_item(data=data2, overwrite=True) + + data3 = {'id': '123', 'data': '812'} + table.put_item.when.called_with(data=data3).should.throw( + ConditionalCheckFailedException) + + returned_item = table.lookup('123') + dict(returned_item).should.equal(data2) + + data4 = {'id': '124', 'data': 812} + table.put_item(data=data4) + + returned_item = table.lookup('124') + dict(returned_item).should.equal(data4) + + +@mock_dynamodb2_deprecated +def test_conflicting_writes(): + table = Table.create('messages', schema=[ + HashKey('id'), + ]) + + item_data = {'id': '123', 'data': '678'} + item1 = Item(table, item_data) + item2 = Item(table, item_data) + item1.save() + + item1['data'] = '579' + item2['data'] = '912' + + item1.save() + item2.save.when.called_with().should.throw(ConditionalCheckFailedException) + + +""" +boto3 +""" + + +@mock_dynamodb2 +def test_boto3_create_table(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table.name.should.equal('users') + + +def _create_user_table(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + table = dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'username', + 'KeyType': 'HASH' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'username', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + return dynamodb.Table('users') + + +@mock_dynamodb2 +def test_boto3_conditions(): + table = _create_user_table() + + table.put_item(Item={'username': 'johndoe'}) + table.put_item(Item={'username': 'janedoe'}) + + response = table.query( + KeyConditionExpression=Key('username').eq('johndoe') + ) + response['Count'].should.equal(1) + response['Items'].should.have.length_of(1) + response['Items'][0].should.equal({"username": "johndoe"}) + + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': ['bar'] + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + final_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(final_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_fail(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item.when.called_with( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'NE', + 'AttributeValueList': ['bar'] + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Value': 'bar', + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail_because_expect_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'Exists': False + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_fail_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'baz'}) + table.update_item.when.called_with( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=bar', + Expected={ + 'foo': { + 'ComparisonOperator': 'NULL', + } + }).should.throw(botocore.client.ClientError) + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'Value': 'bar', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_not_exists(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'Exists': False, + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_not_exists_by_compare_to_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'whatever': { + 'ComparisonOperator': 'NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_update_item_conditions_pass_because_expect_exists_by_compare_to_not_null(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.update_item( + Key={'username': 'johndoe'}, + UpdateExpression='SET foo=baz', + Expected={ + 'foo': { + 'ComparisonOperator': 'NOT_NULL', + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + +@mock_dynamodb2 +def test_boto3_put_item_conditions_pass(): + table = _create_user_table() + table.put_item(Item={'username': 'johndoe', 'foo': 'bar'}) + table.put_item( + Item={'username': 'johndoe', 'foo': 'baz'}, + Expected={ + 'foo': { + 'ComparisonOperator': 'EQ', + 'AttributeValueList': ['bar'] + } + }) + returned_item = table.get_item(Key={'username': 'johndoe'}) + assert dict(returned_item)['Item']['foo'].should.equal("baz") + + +@mock_dynamodb2 +def test_scan_pagination(): + table = _create_user_table() + + expected_usernames = ['user{0}'.format(i) for i in range(10)] + for u in expected_usernames: + table.put_item(Item={'username': u}) + + page1 = table.scan(Limit=6) + page1['Count'].should.equal(6) + page1['Items'].should.have.length_of(6) + page1.should.have.key('LastEvaluatedKey') + + page2 = table.scan(Limit=6, + ExclusiveStartKey=page1['LastEvaluatedKey']) + page2['Count'].should.equal(4) + page2['Items'].should.have.length_of(4) + page2.should_not.have.key('LastEvaluatedKey') + + results = page1['Items'] + page2['Items'] + usernames = set([r['username'] for r in results]) + usernames.should.equal(set(expected_usernames)) diff --git a/tests/test_dynamodb2/test_server.py b/tests/test_dynamodb2/test_server.py index af820beaf..be94df0f4 100644 --- a/tests/test_dynamodb2/test_server.py +++ b/tests/test_dynamodb2/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_table_list(): - backend = server.create_backend_app("dynamodb2") - test_client = backend.test_client() - res = test_client.get('/') - res.status_code.should.equal(404) - - headers = {'X-Amz-Target': 'TestTable.ListTables'} - res = test_client.get('/', headers=headers) - res.data.should.contain(b'TableNames') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_table_list(): + backend = server.create_backend_app("dynamodb2") + test_client = backend.test_client() + res = test_client.get('/') + res.status_code.should.equal(404) + + headers = {'X-Amz-Target': 'TestTable.ListTables'} + res = test_client.get('/', headers=headers) + res.data.should.contain(b'TableNames') diff --git a/tests/test_ec2/test_account_attributes.py b/tests/test_ec2/test_account_attributes.py index 30309bec8..45ae09419 100644 --- a/tests/test_ec2/test_account_attributes.py +++ b/tests/test_ec2/test_account_attributes.py @@ -1,44 +1,44 @@ -from __future__ import unicode_literals -import boto3 -from moto import mock_ec2 -import sure # noqa - - -@mock_ec2 -def test_describe_account_attributes(): - conn = boto3.client('ec2', region_name='us-east-1') - response = conn.describe_account_attributes() - expected_attribute_values = [{ - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'vpc-max-security-groups-per-interface' - }, { - 'AttributeValues': [{ - 'AttributeValue': '20' - }], - 'AttributeName': 'max-instances' - }, { - 'AttributeValues': [{ - 'AttributeValue': 'EC2' - }, { - 'AttributeValue': 'VPC' - }], - 'AttributeName': 'supported-platforms' - }, { - 'AttributeValues': [{ - 'AttributeValue': 'none' - }], - 'AttributeName': 'default-vpc' - }, { - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'max-elastic-ips' - }, { - 'AttributeValues': [{ - 'AttributeValue': '5' - }], - 'AttributeName': 'vpc-max-elastic-ips' - }] - response['AccountAttributes'].should.equal(expected_attribute_values) +from __future__ import unicode_literals +import boto3 +from moto import mock_ec2 +import sure # noqa + + +@mock_ec2 +def test_describe_account_attributes(): + conn = boto3.client('ec2', region_name='us-east-1') + response = conn.describe_account_attributes() + expected_attribute_values = [{ + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-security-groups-per-interface' + }, { + 'AttributeValues': [{ + 'AttributeValue': '20' + }], + 'AttributeName': 'max-instances' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'EC2' + }, { + 'AttributeValue': 'VPC' + }], + 'AttributeName': 'supported-platforms' + }, { + 'AttributeValues': [{ + 'AttributeValue': 'none' + }], + 'AttributeName': 'default-vpc' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'max-elastic-ips' + }, { + 'AttributeValues': [{ + 'AttributeValue': '5' + }], + 'AttributeName': 'vpc-max-elastic-ips' + }] + response['AccountAttributes'].should.equal(expected_attribute_values) diff --git a/tests/test_ec2/test_amazon_dev_pay.py b/tests/test_ec2/test_amazon_dev_pay.py index 38e1eb751..1dd9cc74e 100644 --- a/tests/test_ec2/test_amazon_dev_pay.py +++ b/tests/test_ec2/test_amazon_dev_pay.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_amazon_dev_pay(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_amazon_dev_pay(): + pass diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index a8d4d1b67..bb5fb3fac 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -1,776 +1,776 @@ -from __future__ import unicode_literals - -import boto -import boto.ec2 -import boto3 -from boto.exception import EC2ResponseError -from botocore.exceptions import ClientError -# Ensure 'assert_raises' context manager support for Python 2.6 -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from moto.ec2.models import AMIS -from tests.helpers import requires_boto_gte - - -@mock_ec2_deprecated -def test_ami_create_and_delete(): - conn = boto.connect_ec2('the_key', 'the_secret') - - initial_ami_count = len(AMIS) - conn.get_all_volumes().should.have.length_of(0) - conn.get_all_snapshots().should.have.length_of(initial_ami_count) - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - image_id = conn.create_image( - instance.id, "test-ami", "this is a test ami", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') - - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - - all_images = conn.get_all_images() - set([i.id for i in all_images]).should.contain(image_id) - - retrieved_image = [i for i in all_images if i.id == image_id][0] - - retrieved_image.id.should.equal(image_id) - retrieved_image.virtualization_type.should.equal(instance.virtualization_type) - retrieved_image.architecture.should.equal(instance.architecture) - retrieved_image.kernel_id.should.equal(instance.kernel) - retrieved_image.platform.should.equal(instance.platform) - retrieved_image.creationDate.should_not.be.none - instance.terminate() - - # Ensure we're no longer creating a volume - volumes = conn.get_all_volumes() - volumes.should.have.length_of(0) - - # Validate auto-created snapshot - snapshots = conn.get_all_snapshots() - snapshots.should.have.length_of(initial_ami_count + 1) - - retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id - [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) - snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] - snapshot.description.should.equal( - "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) - - # root device should be in AMI's block device mappings - root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) - root_mapping.should_not.be.none - - # Deregister - with assert_raises(EC2ResponseError) as ex: - success = conn.deregister_image(image_id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') - - success = conn.deregister_image(image_id) - success.should.be.true - - with assert_raises(EC2ResponseError) as cm: - conn.deregister_image(image_id) - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.14.0") -@mock_ec2_deprecated -def test_ami_copy(): - conn = boto.ec2.connect_to_region("us-west-1") - - initial_ami_count = len(AMIS) - conn.get_all_volumes().should.have.length_of(0) - conn.get_all_snapshots().should.have.length_of(initial_ami_count) - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - source_image_id = conn.create_image( - instance.id, "test-ami", "this is a test ami") - instance.terminate() - source_image = conn.get_all_images(image_ids=[source_image_id])[0] - - # Boto returns a 'CopyImage' object with an image_id attribute here. Use - # the image_id to fetch the full info. - with assert_raises(EC2ResponseError) as ex: - copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", - dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') - - copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") - copy_image_id = copy_image_ref.image_id - copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] - - copy_image.id.should.equal(copy_image_id) - copy_image.virtualization_type.should.equal( - source_image.virtualization_type) - copy_image.architecture.should.equal(source_image.architecture) - copy_image.kernel_id.should.equal(source_image.kernel_id) - copy_image.platform.should.equal(source_image.platform) - - # Ensure we're no longer creating a volume - conn.get_all_volumes().should.have.length_of(0) - - # Validate auto-created snapshot - conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) - - copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( - source_image.block_device_mapping.current_value.snapshot_id) - - # Copy from non-existent source ID. - with assert_raises(EC2ResponseError) as cm: - conn.copy_image(source_image.region.name, 'ami-abcd1234', - "test-copy-ami", "this is a test copy ami") - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Copy from non-existent source region. - with assert_raises(EC2ResponseError) as cm: - invalid_region = 'us-east-1' if (source_image.region.name != - 'us-east-1') else 'us-west-1' - conn.copy_image(invalid_region, source_image.id, - "test-copy-ami", "this is a test copy ami") - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_all_images()[0] - - with assert_raises(EC2ResponseError) as ex: - image.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - image.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the DHCP options - image = conn.get_all_images()[0] - image.tags.should.have.length_of(1) - image.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_ami_create_from_missing_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - args = ["i-abcdefg", "test-ami", "this is a test ami"] - - with assert_raises(EC2ResponseError) as cm: - conn.create_image(*args) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_pulls_attributes_from_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.modify_attribute("kernel", "test-kernel") - - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.kernel_id.should.equal('test-kernel') - - -@mock_ec2_deprecated -def test_ami_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - reservationA = conn.run_instances('ami-1234abcd') - instanceA = reservationA.instances[0] - instanceA.modify_attribute("architecture", "i386") - instanceA.modify_attribute("kernel", "k-1234abcd") - instanceA.modify_attribute("platform", "windows") - instanceA.modify_attribute("virtualization_type", "hvm") - imageA_id = conn.create_image( - instanceA.id, "test-ami-A", "this is a test ami") - imageA = conn.get_image(imageA_id) - - reservationB = conn.run_instances('ami-abcd1234') - instanceB = reservationB.instances[0] - instanceB.modify_attribute("architecture", "x86_64") - instanceB.modify_attribute("kernel", "k-abcd1234") - instanceB.modify_attribute("platform", "linux") - instanceB.modify_attribute("virtualization_type", "paravirtual") - imageB_id = conn.create_image( - instanceB.id, "test-ami-B", "this is a test ami") - imageB = conn.get_image(imageB_id) - imageB.set_launch_permissions(group_names=("all")) - - amis_by_architecture = conn.get_all_images( - filters={'architecture': 'x86_64'}) - set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) - len(amis_by_architecture).should.equal(35) - - amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) - set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) - - amis_by_virtualization = conn.get_all_images( - filters={'virtualization-type': 'paravirtual'}) - set([ami.id for ami in amis_by_virtualization] - ).should.contain(imageB.id) - len(amis_by_virtualization).should.equal(3) - - amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) - set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) - len(amis_by_platform).should.equal(24) - - amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) - set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) - - amis_by_state = conn.get_all_images(filters={'state': 'available'}) - ami_ids_by_state = [ami.id for ami in amis_by_state] - ami_ids_by_state.should.contain(imageA.id) - ami_ids_by_state.should.contain(imageB.id) - len(amis_by_state).should.equal(36) - - amis_by_name = conn.get_all_images(filters={'name': imageA.name}) - set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) - - amis_by_public = conn.get_all_images(filters={'is-public': True}) - set([ami.id for ami in amis_by_public]).should.contain(imageB.id) - len(amis_by_public).should.equal(35) - - amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) - set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) - len(amis_by_nonpublic).should.equal(1) - - -@mock_ec2_deprecated -def test_ami_filtering_via_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - reservationA = conn.run_instances('ami-1234abcd') - instanceA = reservationA.instances[0] - imageA_id = conn.create_image( - instanceA.id, "test-ami-A", "this is a test ami") - imageA = conn.get_image(imageA_id) - imageA.add_tag("a key", "some value") - - reservationB = conn.run_instances('ami-abcd1234') - instanceB = reservationB.instances[0] - imageB_id = conn.create_image( - instanceB.id, "test-ami-B", "this is a test ami") - imageB = conn.get_image(imageB_id) - imageB.add_tag("another key", "some other value") - - amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) - set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) - - amis_by_tagB = conn.get_all_images( - filters={'tag:another key': 'some other value'}) - set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) - - -@mock_ec2_deprecated -def test_getting_missing_ami(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_image('ami-missing') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_getting_malformed_ami(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_image('foo-missing') - cm.exception.code.should.equal('InvalidAMIID.Malformed') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ami_attribute_group_permissions(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - ADD_GROUP_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'groups': 'all'} - - REMOVE_GROUP_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'groups': 'all'} - - # Add 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: - conn.modify_image_attribute( - **dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_image_attribute(**ADD_GROUP_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['groups'].should.have.length_of(1) - attributes.attrs['groups'].should.equal(['all']) - image = conn.get_image(image_id) - image.is_public.should.equal(True) - - # Add is idempotent - conn.modify_image_attribute.when.called_with( - **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Remove 'all' group and confirm - conn.modify_image_attribute(**REMOVE_GROUP_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove is idempotent - conn.modify_image_attribute.when.called_with( - **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) - - -@mock_ec2_deprecated -def test_ami_attribute_user_permissions(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - # Both str and int values should work. - USER1 = '123456789011' - USER2 = 123456789022 - - ADD_USERS_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'user_ids': [USER1, USER2]} - - REMOVE_USERS_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'user_ids': [USER1, USER2]} - - REMOVE_SINGLE_USER_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'user_ids': [USER1]} - - # Add multiple users and confirm - conn.modify_image_attribute(**ADD_USERS_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal( - set([str(USER1), str(USER2)])) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Add is idempotent - conn.modify_image_attribute.when.called_with( - **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) - - # Remove single user and confirm - conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(1) - set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove multiple users and confirm - conn.modify_image_attribute(**REMOVE_USERS_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - # Remove is idempotent - conn.modify_image_attribute.when.called_with( - **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) - - -@mock_ec2 -def test_ami_describe_executable_users(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage', )['ImageId'] - - USER1 = '123456789011' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER1])['Images'] - images.should.have.length_of(1) - images[0]['ImageId'].should.equal(image_id) - - -@mock_ec2 -def test_ami_describe_executable_users_negative(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage')['ImageId'] - - USER1 = '123456789011' - USER2 = '113355789012' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER2])['Images'] - images.should.have.length_of(0) - - -@mock_ec2 -def test_ami_describe_executable_users_and_filter(): - conn = boto3.client('ec2', region_name='us-east-1') - ec2 = boto3.resource('ec2', 'us-east-1') - ec2.create_instances(ImageId='', - MinCount=1, - MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) - instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] - image_id = conn.create_image(InstanceId=instance_id, - Name='ImageToDelete', )['ImageId'] - - USER1 = '123456789011' - - ADD_USER_ARGS = {'ImageId': image_id, - 'Attribute': 'launchPermission', - 'OperationType': 'add', - 'UserIds': [USER1]} - - # Add users and get no images - conn.modify_image_attribute(**ADD_USER_ARGS) - - attributes = conn.describe_image_attribute(ImageId=image_id, - Attribute='LaunchPermissions', - DryRun=False) - attributes['LaunchPermissions'].should.have.length_of(1) - attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) - images = conn.describe_images(ExecutableUsers=[USER1], - Filters=[{'Name': 'state', 'Values': ['available']}])['Images'] - images.should.have.length_of(1) - images[0]['ImageId'].should.equal(image_id) - - -@mock_ec2_deprecated -def test_ami_attribute_user_and_group_permissions(): - """ - Boto supports adding/removing both users and groups at the same time. - Just spot-check this -- input variations, idempotency, etc are validated - via user-specific and group-specific tests above. - """ - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Baseline - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.name.should.equal('launch_permission') - attributes.attrs.should.have.length_of(0) - - USER1 = '123456789011' - USER2 = '123456789022' - - ADD_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'add', - 'groups': ['all'], - 'user_ids': [USER1, USER2]} - - REMOVE_ARGS = {'image_id': image.id, - 'attribute': 'launchPermission', - 'operation': 'remove', - 'groups': ['all'], - 'user_ids': [USER1, USER2]} - - # Add and confirm - conn.modify_image_attribute(**ADD_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs['user_ids'].should.have.length_of(2) - set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) - set(attributes.attrs['groups']).should.equal(set(['all'])) - image = conn.get_image(image_id) - image.is_public.should.equal(True) - - # Remove and confirm - conn.modify_image_attribute(**REMOVE_ARGS) - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - image = conn.get_image(image_id) - image.is_public.should.equal(False) - - -@mock_ec2_deprecated -def test_ami_attribute_error_cases(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - - # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - groups='everyone') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that isn't an integer. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='12345678901A') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that is > length 12. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='1234567890123') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with user ID that is < length 12. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids='12345678901') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with one invalid user ID among other valid IDs, ensure no - # partial changes. - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute(image.id, - attribute='launchPermission', - operation='add', - user_ids=['123456789011', 'foo', '123456789022']) - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - attributes = conn.get_image_attribute( - image.id, attribute='launchPermission') - attributes.attrs.should.have.length_of(0) - - # Error: Add with invalid image ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute("ami-abcd1234", - attribute='launchPermission', - operation='add', - groups='all') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Remove with invalid image ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_image_attribute("ami-abcd1234", - attribute='launchPermission', - operation='remove', - groups='all') - cm.exception.code.should.equal('InvalidAMIID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_ami_describe_non_existent(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - # Valid pattern but non-existent id - img = ec2.Image('ami-abcd1234') - with assert_raises(ClientError): - img.load() - # Invalid ami pattern - img = ec2.Image('not_an_ami_id') - with assert_raises(ClientError): - img.load() - - -@mock_ec2 -def test_ami_filter_wildcard(): - ec2_resource = boto3.resource('ec2', region_name='us-west-1') - ec2_client = boto3.client('ec2', region_name='us-west-1') - - instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - instance.create_image(Name='test-image') - - # create an image with the same owner but will not match the filter - instance.create_image(Name='not-matching-image') - - my_images = ec2_client.describe_images( - Owners=['111122223333'], - Filters=[{'Name': 'name', 'Values': ['test*']}] - )['Images'] - my_images.should.have.length_of(1) - - -@mock_ec2 -def test_ami_filter_by_owner_id(): - client = boto3.client('ec2', region_name='us-east-1') - - ubuntu_id = '099720109477' - - ubuntu_images = client.describe_images(Owners=[ubuntu_id]) - all_images = client.describe_images() - - ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] - all_ids = [ami['OwnerId'] for ami in all_images['Images']] - - # Assert all ubuntu_ids are the same and one equals ubuntu_id - assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id - # Check we actually have a subset of images - assert len(ubuntu_ids) < len(all_ids) - - -@mock_ec2 -def test_ami_filter_by_self(): - ec2_resource = boto3.resource('ec2', region_name='us-west-1') - ec2_client = boto3.client('ec2', region_name='us-west-1') - - my_images = ec2_client.describe_images(Owners=['self'])['Images'] - my_images.should.have.length_of(0) - - # Create a new image - instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - instance.create_image(Name='test-image') - - my_images = ec2_client.describe_images(Owners=['self'])['Images'] - my_images.should.have.length_of(1) - - -@mock_ec2 -def test_ami_snapshots_have_correct_owner(): - ec2_client = boto3.client('ec2', region_name='us-west-1') - - images_response = ec2_client.describe_images() - - owner_id_to_snapshot_ids = {} - for image in images_response['Images']: - owner_id = image['OwnerId'] - snapshot_ids = [ - block_device_mapping['Ebs']['SnapshotId'] - for block_device_mapping in image['BlockDeviceMappings'] - ] - existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) - owner_id_to_snapshot_ids[owner_id] = ( - existing_snapshot_ids + snapshot_ids - ) - - for owner_id in owner_id_to_snapshot_ids: - snapshots_rseponse = ec2_client.describe_snapshots( - SnapshotIds=owner_id_to_snapshot_ids[owner_id] - ) - - for snapshot in snapshots_rseponse['Snapshots']: - assert owner_id == snapshot['OwnerId'] +from __future__ import unicode_literals + +import boto +import boto.ec2 +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from moto.ec2.models import AMIS +from tests.helpers import requires_boto_gte + + +@mock_ec2_deprecated +def test_ami_create_and_delete(): + conn = boto.connect_ec2('the_key', 'the_secret') + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateImage operation: Request would have succeeded, but DryRun flag is set') + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + + all_images = conn.get_all_images() + set([i.id for i in all_images]).should.contain(image_id) + + retrieved_image = [i for i in all_images if i.id == image_id][0] + + retrieved_image.id.should.equal(image_id) + retrieved_image.virtualization_type.should.equal(instance.virtualization_type) + retrieved_image.architecture.should.equal(instance.architecture) + retrieved_image.kernel_id.should.equal(instance.kernel) + retrieved_image.platform.should.equal(instance.platform) + retrieved_image.creationDate.should_not.be.none + instance.terminate() + + # Ensure we're no longer creating a volume + volumes = conn.get_all_volumes() + volumes.should.have.length_of(0) + + # Validate auto-created snapshot + snapshots = conn.get_all_snapshots() + snapshots.should.have.length_of(initial_ami_count + 1) + + retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id + [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) + snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] + snapshot.description.should.equal( + "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) + + # root device should be in AMI's block device mappings + root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) + root_mapping.should_not.be.none + + # Deregister + with assert_raises(EC2ResponseError) as ex: + success = conn.deregister_image(image_id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeregisterImage operation: Request would have succeeded, but DryRun flag is set') + + success = conn.deregister_image(image_id) + success.should.be.true + + with assert_raises(EC2ResponseError) as cm: + conn.deregister_image(image_id) + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.14.0") +@mock_ec2_deprecated +def test_ami_copy(): + conn = boto.ec2.connect_to_region("us-west-1") + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + source_image_id = conn.create_image( + instance.id, "test-ami", "this is a test ami") + instance.terminate() + source_image = conn.get_all_images(image_ids=[source_image_id])[0] + + # Boto returns a 'CopyImage' object with an image_id attribute here. Use + # the image_id to fetch the full info. + with assert_raises(EC2ResponseError) as ex: + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", + dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CopyImage operation: Request would have succeeded, but DryRun flag is set') + + copy_image_ref = conn.copy_image( + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami") + copy_image_id = copy_image_ref.image_id + copy_image = conn.get_all_images(image_ids=[copy_image_id])[0] + + copy_image.id.should.equal(copy_image_id) + copy_image.virtualization_type.should.equal( + source_image.virtualization_type) + copy_image.architecture.should.equal(source_image.architecture) + copy_image.kernel_id.should.equal(source_image.kernel_id) + copy_image.platform.should.equal(source_image.platform) + + # Ensure we're no longer creating a volume + conn.get_all_volumes().should.have.length_of(0) + + # Validate auto-created snapshot + conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) + + copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( + source_image.block_device_mapping.current_value.snapshot_id) + + # Copy from non-existent source ID. + with assert_raises(EC2ResponseError) as cm: + conn.copy_image(source_image.region.name, 'ami-abcd1234', + "test-copy-ami", "this is a test copy ami") + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Copy from non-existent source region. + with assert_raises(EC2ResponseError) as cm: + invalid_region = 'us-east-1' if (source_image.region.name != + 'us-east-1') else 'us-west-1' + conn.copy_image(invalid_region, source_image.id, + "test-copy-ami", "this is a test copy ami") + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_all_images()[0] + + with assert_raises(EC2ResponseError) as ex: + image.add_tag("a key", "some value", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + image.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the DHCP options + image = conn.get_all_images()[0] + image.tags.should.have.length_of(1) + image.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_ami_create_from_missing_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + args = ["i-abcdefg", "test-ami", "this is a test ami"] + + with assert_raises(EC2ResponseError) as cm: + conn.create_image(*args) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_pulls_attributes_from_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.modify_attribute("kernel", "test-kernel") + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.kernel_id.should.equal('test-kernel') + + +@mock_ec2_deprecated +def test_ami_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + reservationA = conn.run_instances('ami-1234abcd') + instanceA = reservationA.instances[0] + instanceA.modify_attribute("architecture", "i386") + instanceA.modify_attribute("kernel", "k-1234abcd") + instanceA.modify_attribute("platform", "windows") + instanceA.modify_attribute("virtualization_type", "hvm") + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") + imageA = conn.get_image(imageA_id) + + reservationB = conn.run_instances('ami-abcd1234') + instanceB = reservationB.instances[0] + instanceB.modify_attribute("architecture", "x86_64") + instanceB.modify_attribute("kernel", "k-abcd1234") + instanceB.modify_attribute("platform", "linux") + instanceB.modify_attribute("virtualization_type", "paravirtual") + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") + imageB = conn.get_image(imageB_id) + imageB.set_launch_permissions(group_names=("all")) + + amis_by_architecture = conn.get_all_images( + filters={'architecture': 'x86_64'}) + set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) + len(amis_by_architecture).should.equal(35) + + amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) + set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) + + amis_by_virtualization = conn.get_all_images( + filters={'virtualization-type': 'paravirtual'}) + set([ami.id for ami in amis_by_virtualization] + ).should.contain(imageB.id) + len(amis_by_virtualization).should.equal(3) + + amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) + set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) + len(amis_by_platform).should.equal(24) + + amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) + set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) + + amis_by_state = conn.get_all_images(filters={'state': 'available'}) + ami_ids_by_state = [ami.id for ami in amis_by_state] + ami_ids_by_state.should.contain(imageA.id) + ami_ids_by_state.should.contain(imageB.id) + len(amis_by_state).should.equal(36) + + amis_by_name = conn.get_all_images(filters={'name': imageA.name}) + set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) + + amis_by_public = conn.get_all_images(filters={'is-public': True}) + set([ami.id for ami in amis_by_public]).should.contain(imageB.id) + len(amis_by_public).should.equal(35) + + amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) + set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) + len(amis_by_nonpublic).should.equal(1) + + +@mock_ec2_deprecated +def test_ami_filtering_via_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + reservationA = conn.run_instances('ami-1234abcd') + instanceA = reservationA.instances[0] + imageA_id = conn.create_image( + instanceA.id, "test-ami-A", "this is a test ami") + imageA = conn.get_image(imageA_id) + imageA.add_tag("a key", "some value") + + reservationB = conn.run_instances('ami-abcd1234') + instanceB = reservationB.instances[0] + imageB_id = conn.create_image( + instanceB.id, "test-ami-B", "this is a test ami") + imageB = conn.get_image(imageB_id) + imageB.add_tag("another key", "some other value") + + amis_by_tagA = conn.get_all_images(filters={'tag:a key': 'some value'}) + set([ami.id for ami in amis_by_tagA]).should.equal(set([imageA.id])) + + amis_by_tagB = conn.get_all_images( + filters={'tag:another key': 'some other value'}) + set([ami.id for ami in amis_by_tagB]).should.equal(set([imageB.id])) + + +@mock_ec2_deprecated +def test_getting_missing_ami(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_image('ami-missing') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_getting_malformed_ami(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_image('foo-missing') + cm.exception.code.should.equal('InvalidAMIID.Malformed') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ami_attribute_group_permissions(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + ADD_GROUP_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'groups': 'all'} + + REMOVE_GROUP_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'groups': 'all'} + + # Add 'all' group and confirm + with assert_raises(EC2ResponseError) as ex: + conn.modify_image_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyImageAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_image_attribute(**ADD_GROUP_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['groups'].should.have.length_of(1) + attributes.attrs['groups'].should.equal(['all']) + image = conn.get_image(image_id) + image.is_public.should.equal(True) + + # Add is idempotent + conn.modify_image_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Remove 'all' group and confirm + conn.modify_image_attribute(**REMOVE_GROUP_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove is idempotent + conn.modify_image_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + + +@mock_ec2_deprecated +def test_ami_attribute_user_permissions(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + # Both str and int values should work. + USER1 = '123456789011' + USER2 = 123456789022 + + ADD_USERS_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'user_ids': [USER1, USER2]} + + REMOVE_USERS_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'user_ids': [USER1, USER2]} + + REMOVE_SINGLE_USER_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'user_ids': [USER1]} + + # Add multiple users and confirm + conn.modify_image_attribute(**ADD_USERS_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(2) + set(attributes.attrs['user_ids']).should.equal( + set([str(USER1), str(USER2)])) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Add is idempotent + conn.modify_image_attribute.when.called_with( + **ADD_USERS_ARGS).should_not.throw(EC2ResponseError) + + # Remove single user and confirm + conn.modify_image_attribute(**REMOVE_SINGLE_USER_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(1) + set(attributes.attrs['user_ids']).should.equal(set([str(USER2)])) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove multiple users and confirm + conn.modify_image_attribute(**REMOVE_USERS_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + # Remove is idempotent + conn.modify_image_attribute.when.called_with( + **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) + + +@mock_ec2 +def test_ami_describe_executable_users(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='TestImage', )['ImageId'] + + USER1 = '123456789011' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER1])['Images'] + images.should.have.length_of(1) + images[0]['ImageId'].should.equal(image_id) + + +@mock_ec2 +def test_ami_describe_executable_users_negative(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='TestImage')['ImageId'] + + USER1 = '123456789011' + USER2 = '113355789012' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER2])['Images'] + images.should.have.length_of(0) + + +@mock_ec2 +def test_ami_describe_executable_users_and_filter(): + conn = boto3.client('ec2', region_name='us-east-1') + ec2 = boto3.resource('ec2', 'us-east-1') + ec2.create_instances(ImageId='', + MinCount=1, + MaxCount=1) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) + instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] + image_id = conn.create_image(InstanceId=instance_id, + Name='ImageToDelete', )['ImageId'] + + USER1 = '123456789011' + + ADD_USER_ARGS = {'ImageId': image_id, + 'Attribute': 'launchPermission', + 'OperationType': 'add', + 'UserIds': [USER1]} + + # Add users and get no images + conn.modify_image_attribute(**ADD_USER_ARGS) + + attributes = conn.describe_image_attribute(ImageId=image_id, + Attribute='LaunchPermissions', + DryRun=False) + attributes['LaunchPermissions'].should.have.length_of(1) + attributes['LaunchPermissions'][0]['UserId'].should.equal(USER1) + images = conn.describe_images(ExecutableUsers=[USER1], + Filters=[{'Name': 'state', 'Values': ['available']}])['Images'] + images.should.have.length_of(1) + images[0]['ImageId'].should.equal(image_id) + + +@mock_ec2_deprecated +def test_ami_attribute_user_and_group_permissions(): + """ + Boto supports adding/removing both users and groups at the same time. + Just spot-check this -- input variations, idempotency, etc are validated + via user-specific and group-specific tests above. + """ + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Baseline + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.name.should.equal('launch_permission') + attributes.attrs.should.have.length_of(0) + + USER1 = '123456789011' + USER2 = '123456789022' + + ADD_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'add', + 'groups': ['all'], + 'user_ids': [USER1, USER2]} + + REMOVE_ARGS = {'image_id': image.id, + 'attribute': 'launchPermission', + 'operation': 'remove', + 'groups': ['all'], + 'user_ids': [USER1, USER2]} + + # Add and confirm + conn.modify_image_attribute(**ADD_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs['user_ids'].should.have.length_of(2) + set(attributes.attrs['user_ids']).should.equal(set([USER1, USER2])) + set(attributes.attrs['groups']).should.equal(set(['all'])) + image = conn.get_image(image_id) + image.is_public.should.equal(True) + + # Remove and confirm + conn.modify_image_attribute(**REMOVE_ARGS) + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + image = conn.get_image(image_id) + image.is_public.should.equal(False) + + +@mock_ec2_deprecated +def test_ami_attribute_error_cases(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + + # Error: Add with group != 'all' + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + groups='everyone') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that isn't an integer. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='12345678901A') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that is > length 12. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='1234567890123') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with user ID that is < length 12. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids='12345678901') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with one invalid user ID among other valid IDs, ensure no + # partial changes. + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute(image.id, + attribute='launchPermission', + operation='add', + user_ids=['123456789011', 'foo', '123456789022']) + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + attributes = conn.get_image_attribute( + image.id, attribute='launchPermission') + attributes.attrs.should.have.length_of(0) + + # Error: Add with invalid image ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute("ami-abcd1234", + attribute='launchPermission', + operation='add', + groups='all') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Remove with invalid image ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_image_attribute("ami-abcd1234", + attribute='launchPermission', + operation='remove', + groups='all') + cm.exception.code.should.equal('InvalidAMIID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_ami_describe_non_existent(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + # Valid pattern but non-existent id + img = ec2.Image('ami-abcd1234') + with assert_raises(ClientError): + img.load() + # Invalid ami pattern + img = ec2.Image('not_an_ami_id') + with assert_raises(ClientError): + img.load() + + +@mock_ec2 +def test_ami_filter_wildcard(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + # create an image with the same owner but will not match the filter + instance.create_image(Name='not-matching-image') + + my_images = ec2_client.describe_images( + Owners=['111122223333'], + Filters=[{'Name': 'name', 'Values': ['test*']}] + )['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_filter_by_owner_id(): + client = boto3.client('ec2', region_name='us-east-1') + + ubuntu_id = '099720109477' + + ubuntu_images = client.describe_images(Owners=[ubuntu_id]) + all_images = client.describe_images() + + ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] + all_ids = [ami['OwnerId'] for ami in all_images['Images']] + + # Assert all ubuntu_ids are the same and one equals ubuntu_id + assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id + # Check we actually have a subset of images + assert len(ubuntu_ids) < len(all_ids) + + +@mock_ec2 +def test_ami_filter_by_self(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(0) + + # Create a new image + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_snapshots_have_correct_owner(): + ec2_client = boto3.client('ec2', region_name='us-west-1') + + images_response = ec2_client.describe_images() + + owner_id_to_snapshot_ids = {} + for image in images_response['Images']: + owner_id = image['OwnerId'] + snapshot_ids = [ + block_device_mapping['Ebs']['SnapshotId'] + for block_device_mapping in image['BlockDeviceMappings'] + ] + existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) + owner_id_to_snapshot_ids[owner_id] = ( + existing_snapshot_ids + snapshot_ids + ) + + for owner_id in owner_id_to_snapshot_ids: + snapshots_rseponse = ec2_client.describe_snapshots( + SnapshotIds=owner_id_to_snapshot_ids[owner_id] + ) + + for snapshot in snapshots_rseponse['Snapshots']: + assert owner_id == snapshot['OwnerId'] diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index c64f075ca..0c94687fa 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -1,54 +1,54 @@ -from __future__ import unicode_literals -import boto -import boto.ec2 -import boto3 -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_describe_regions(): - conn = boto.connect_ec2('the_key', 'the_secret') - regions = conn.get_all_regions() - regions.should.have.length_of(16) - for region in regions: - region.endpoint.should.contain(region.name) - - -@mock_ec2_deprecated -def test_availability_zones(): - conn = boto.connect_ec2('the_key', 'the_secret') - regions = conn.get_all_regions() - for region in regions: - conn = boto.ec2.connect_to_region(region.name) - if conn is None: - continue - for zone in conn.get_all_zones(): - zone.name.should.contain(region.name) - - -@mock_ec2 -def test_boto3_describe_regions(): - ec2 = boto3.client('ec2', 'us-east-1') - resp = ec2.describe_regions() - resp['Regions'].should.have.length_of(16) - for rec in resp['Regions']: - rec['Endpoint'].should.contain(rec['RegionName']) - - test_region = 'us-east-1' - resp = ec2.describe_regions(RegionNames=[test_region]) - resp['Regions'].should.have.length_of(1) - resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) - - -@mock_ec2 -def test_boto3_availability_zones(): - ec2 = boto3.client('ec2', 'us-east-1') - resp = ec2.describe_regions() - regions = [r['RegionName'] for r in resp['Regions']] - for region in regions: - conn = boto3.client('ec2', region) - resp = conn.describe_availability_zones() - for rec in resp['AvailabilityZones']: - rec['ZoneName'].should.contain(region) +from __future__ import unicode_literals +import boto +import boto.ec2 +import boto3 +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_describe_regions(): + conn = boto.connect_ec2('the_key', 'the_secret') + regions = conn.get_all_regions() + regions.should.have.length_of(16) + for region in regions: + region.endpoint.should.contain(region.name) + + +@mock_ec2_deprecated +def test_availability_zones(): + conn = boto.connect_ec2('the_key', 'the_secret') + regions = conn.get_all_regions() + for region in regions: + conn = boto.ec2.connect_to_region(region.name) + if conn is None: + continue + for zone in conn.get_all_zones(): + zone.name.should.contain(region.name) + + +@mock_ec2 +def test_boto3_describe_regions(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + resp['Regions'].should.have.length_of(16) + for rec in resp['Regions']: + rec['Endpoint'].should.contain(rec['RegionName']) + + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + + +@mock_ec2 +def test_boto3_availability_zones(): + ec2 = boto3.client('ec2', 'us-east-1') + resp = ec2.describe_regions() + regions = [r['RegionName'] for r in resp['Regions']] + for region in regions: + conn = boto3.client('ec2', region) + resp = conn.describe_availability_zones() + for rec in resp['AvailabilityZones']: + rec['ZoneName'].should.contain(region) diff --git a/tests/test_ec2/test_customer_gateways.py b/tests/test_ec2/test_customer_gateways.py index 589f887f6..82e316723 100644 --- a/tests/test_ec2/test_customer_gateways.py +++ b/tests/test_ec2/test_customer_gateways.py @@ -1,52 +1,52 @@ -from __future__ import unicode_literals -import boto -import sure # noqa -from nose.tools import assert_raises -from nose.tools import assert_false -from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - customer_gateway.should_not.be.none - customer_gateway.id.should.match(r'cgw-\w+') - customer_gateway.type.should.equal('ipsec.1') - customer_gateway.bgp_asn.should.equal(65534) - customer_gateway.ip_address.should.equal('205.251.242.54') - - -@mock_ec2_deprecated -def test_describe_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - cgws = conn.get_all_customer_gateways() - cgws.should.have.length_of(1) - cgws[0].id.should.match(customer_gateway.id) - - -@mock_ec2_deprecated -def test_delete_customer_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - customer_gateway = conn.create_customer_gateway( - 'ipsec.1', '205.251.242.54', 65534) - customer_gateway.should_not.be.none - cgws = conn.get_all_customer_gateways() - cgws[0].id.should.match(customer_gateway.id) - deleted = conn.delete_customer_gateway(customer_gateway.id) - cgws = conn.get_all_customer_gateways() - cgws.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_delete_customer_gateways_bad_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.delete_customer_gateway('cgw-0123abcd') +from __future__ import unicode_literals +import boto +import sure # noqa +from nose.tools import assert_raises +from nose.tools import assert_false +from boto.exception import EC2ResponseError + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + customer_gateway.should_not.be.none + customer_gateway.id.should.match(r'cgw-\w+') + customer_gateway.type.should.equal('ipsec.1') + customer_gateway.bgp_asn.should.equal(65534) + customer_gateway.ip_address.should.equal('205.251.242.54') + + +@mock_ec2_deprecated +def test_describe_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + cgws = conn.get_all_customer_gateways() + cgws.should.have.length_of(1) + cgws[0].id.should.match(customer_gateway.id) + + +@mock_ec2_deprecated +def test_delete_customer_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + customer_gateway = conn.create_customer_gateway( + 'ipsec.1', '205.251.242.54', 65534) + customer_gateway.should_not.be.none + cgws = conn.get_all_customer_gateways() + cgws[0].id.should.match(customer_gateway.id) + deleted = conn.delete_customer_gateway(customer_gateway.id) + cgws = conn.get_all_customer_gateways() + cgws.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_delete_customer_gateways_bad_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.delete_customer_gateway('cgw-0123abcd') diff --git a/tests/test_ec2/test_dhcp_options.py b/tests/test_ec2/test_dhcp_options.py index 4e2520241..2aff803ae 100644 --- a/tests/test_ec2/test_dhcp_options.py +++ b/tests/test_ec2/test_dhcp_options.py @@ -1,333 +1,333 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto3 -import boto -from boto.exception import EC2ResponseError - -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -SAMPLE_DOMAIN_NAME = u'example.com' -SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] - - -@mock_ec2_deprecated -def test_dhcp_options_associate(): - """ associate dhcp option """ - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc = conn.create_vpc("10.0.0.0/16") - - rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) - rval.should.be.equal(True) - - -@mock_ec2_deprecated -def test_dhcp_options_associate_invalid_dhcp_id(): - """ associate dhcp option bad dhcp options id """ - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - with assert_raises(EC2ResponseError) as cm: - conn.associate_dhcp_options("foo", vpc.id) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_options_associate_invalid_vpc_id(): - """ associate dhcp option invalid vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - - with assert_raises(EC2ResponseError) as cm: - conn.associate_dhcp_options(dhcp_options.id, "foo") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_options_delete_with_vpc(): - """Test deletion of dhcp options with vpc""" - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - dhcp_options_id = dhcp_options.id - vpc = conn.create_vpc("10.0.0.0/16") - - rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) - rval.should.be.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options(dhcp_options_id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - vpc.delete() - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options([dhcp_options_id]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_dhcp_options(): - """Create most basic dhcp option""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) - dhcp_option.options[ - u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) - dhcp_option.options[ - u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) - - -@mock_ec2_deprecated -def test_create_dhcp_options_invalid_options(): - """Create invalid dhcp options""" - conn = boto.connect_vpc('the_key', 'the_secret') - servers = ["f", "f", "f", "f", "f"] - - with assert_raises(EC2ResponseError) as cm: - conn.create_dhcp_options(ntp_servers=servers) - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.create_dhcp_options(netbios_node_type="0") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_describe_dhcp_options(): - """Test dhcp options lookup by id""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options() - dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) - dhcp_options.should.be.length_of(1) - - dhcp_options = conn.get_all_dhcp_options() - dhcp_options.should.be.length_of(1) - - -@mock_ec2_deprecated -def test_describe_dhcp_options_invalid_id(): - """get error on invalid dhcp_option_id lookup""" - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options(["1"]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options(): - """delete dhcp option""" - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp_option = conn.create_dhcp_options() - dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) - dhcp_options.should.be.length_of(1) - - conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_dhcp_options([dhcp_option.id]) - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options_invalid_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options() - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options("dopt-abcd1234") - cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_delete_dhcp_options_malformed_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options() - - with assert_raises(EC2ResponseError) as cm: - conn.delete_dhcp_options("foo-abcd1234") - cm.exception.code.should.equal('InvalidDhcpOptionsId.Malformed') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_dhcp_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - dhcp_option = conn.create_dhcp_options() - - dhcp_option.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the DHCP options - dhcp_option = conn.get_all_dhcp_options()[0] - dhcp_option.tags.should.have.length_of(1) - dhcp_option.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp1 = conn.create_dhcp_options('example.com', ['10.0.10.2']) - dhcp1.add_tag('Name', 'TestDhcpOptions1') - dhcp1.add_tag('test-tag', 'test-value') - - dhcp2 = conn.create_dhcp_options('example.com', ['10.0.20.2']) - dhcp2.add_tag('Name', 'TestDhcpOptions2') - dhcp2.add_tag('test-tag', 'test-value') - - filters = {'tag:Name': 'TestDhcpOptions1', 'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options[ - 'domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.10.2') - dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') - dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') - - filters = {'tag:Name': 'TestDhcpOptions2', 'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options[ - 'domain-name'][0].should.be.equal('example.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.20.2') - dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') - dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') - - filters = {'tag:test-tag': 'test-value'} - dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) - - dhcp_options_sets.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - - dhcp1 = conn.create_dhcp_options('test1.com', ['10.0.10.2']) - dhcp1.add_tag('Name', 'TestDhcpOptions1') - dhcp1.add_tag('test-tag', 'test-value') - dhcp1_id = dhcp1.id - - dhcp2 = conn.create_dhcp_options('test2.com', ['10.0.20.2']) - dhcp2.add_tag('Name', 'TestDhcpOptions2') - dhcp2.add_tag('test-tag', 'test-value') - dhcp2_id = dhcp2.id - - dhcp_options_sets = conn.get_all_dhcp_options() - dhcp_options_sets.should.have.length_of(2) - - dhcp_options_sets = conn.get_all_dhcp_options( - filters={'dhcp-options-id': dhcp1_id}) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.10.2') - - dhcp_options_sets = conn.get_all_dhcp_options( - filters={'dhcp-options-id': dhcp2_id}) - - dhcp_options_sets.should.have.length_of(1) - dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') - dhcp_options_sets[0].options[ - 'domain-name-servers'][0].should.be.equal('10.0.20.2') - - -@mock_ec2 -def test_dhcp_options_get_by_value_filter(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} - ]) - - filters = [{'Name': 'value', 'Values': ['10.0.10.2']}] - dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) - dhcp_options_sets.should.have.length_of(1) - - -@mock_ec2 -def test_dhcp_options_get_by_key_filter(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} - ]) - - ec2.create_dhcp_options(DhcpConfigurations=[ - {'Key': 'domain-name', 'Values': ['example.com']}, - {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} - ]) - - filters = [{'Name': 'key', 'Values': ['domain-name']}] - dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) - dhcp_options_sets.should.have.length_of(3) - - -@mock_ec2_deprecated -def test_dhcp_options_get_by_invalid_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - filters = {'invalid-filter': 'invalid-value'} - - conn.get_all_dhcp_options.when.called_with( - filters=filters).should.throw(NotImplementedError) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto3 +import boto +from boto.exception import EC2ResponseError + +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +SAMPLE_DOMAIN_NAME = u'example.com' +SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] + + +@mock_ec2_deprecated +def test_dhcp_options_associate(): + """ associate dhcp option """ + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc = conn.create_vpc("10.0.0.0/16") + + rval = conn.associate_dhcp_options(dhcp_options.id, vpc.id) + rval.should.be.equal(True) + + +@mock_ec2_deprecated +def test_dhcp_options_associate_invalid_dhcp_id(): + """ associate dhcp option bad dhcp options id """ + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + with assert_raises(EC2ResponseError) as cm: + conn.associate_dhcp_options("foo", vpc.id) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_options_associate_invalid_vpc_id(): + """ associate dhcp option invalid vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + + with assert_raises(EC2ResponseError) as cm: + conn.associate_dhcp_options(dhcp_options.id, "foo") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_options_delete_with_vpc(): + """Test deletion of dhcp options with vpc""" + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_options_id = dhcp_options.id + vpc = conn.create_vpc("10.0.0.0/16") + + rval = conn.associate_dhcp_options(dhcp_options_id, vpc.id) + rval.should.be.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options(dhcp_options_id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + vpc.delete() + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options([dhcp_options_id]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_dhcp_options(): + """Create most basic dhcp option""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + dhcp_option.options[u'domain-name'][0].should.be.equal(SAMPLE_DOMAIN_NAME) + dhcp_option.options[ + u'domain-name-servers'][0].should.be.equal(SAMPLE_NAME_SERVERS[0]) + dhcp_option.options[ + u'domain-name-servers'][1].should.be.equal(SAMPLE_NAME_SERVERS[1]) + + +@mock_ec2_deprecated +def test_create_dhcp_options_invalid_options(): + """Create invalid dhcp options""" + conn = boto.connect_vpc('the_key', 'the_secret') + servers = ["f", "f", "f", "f", "f"] + + with assert_raises(EC2ResponseError) as cm: + conn.create_dhcp_options(ntp_servers=servers) + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.create_dhcp_options(netbios_node_type="0") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_describe_dhcp_options(): + """Test dhcp options lookup by id""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options() + dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) + dhcp_options.should.be.length_of(1) + + dhcp_options = conn.get_all_dhcp_options() + dhcp_options.should.be.length_of(1) + + +@mock_ec2_deprecated +def test_describe_dhcp_options_invalid_id(): + """get error on invalid dhcp_option_id lookup""" + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options(["1"]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options(): + """delete dhcp option""" + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp_option = conn.create_dhcp_options() + dhcp_options = conn.get_all_dhcp_options([dhcp_option.id]) + dhcp_options.should.be.length_of(1) + + conn.delete_dhcp_options(dhcp_option.id) # .should.be.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_dhcp_options([dhcp_option.id]) + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options_invalid_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options() + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options("dopt-abcd1234") + cm.exception.code.should.equal('InvalidDhcpOptionID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_delete_dhcp_options_malformed_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options() + + with assert_raises(EC2ResponseError) as cm: + conn.delete_dhcp_options("foo-abcd1234") + cm.exception.code.should.equal('InvalidDhcpOptionsId.Malformed') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_dhcp_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + dhcp_option = conn.create_dhcp_options() + + dhcp_option.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the DHCP options + dhcp_option = conn.get_all_dhcp_options()[0] + dhcp_option.tags.should.have.length_of(1) + dhcp_option.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp1 = conn.create_dhcp_options('example.com', ['10.0.10.2']) + dhcp1.add_tag('Name', 'TestDhcpOptions1') + dhcp1.add_tag('test-tag', 'test-value') + + dhcp2 = conn.create_dhcp_options('example.com', ['10.0.20.2']) + dhcp2.add_tag('Name', 'TestDhcpOptions2') + dhcp2.add_tag('test-tag', 'test-value') + + filters = {'tag:Name': 'TestDhcpOptions1', 'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') + dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions1') + dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') + + filters = {'tag:Name': 'TestDhcpOptions2', 'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options[ + 'domain-name'][0].should.be.equal('example.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') + dhcp_options_sets[0].tags['Name'].should.equal('TestDhcpOptions2') + dhcp_options_sets[0].tags['test-tag'].should.equal('test-value') + + filters = {'tag:test-tag': 'test-value'} + dhcp_options_sets = conn.get_all_dhcp_options(filters=filters) + + dhcp_options_sets.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + + dhcp1 = conn.create_dhcp_options('test1.com', ['10.0.10.2']) + dhcp1.add_tag('Name', 'TestDhcpOptions1') + dhcp1.add_tag('test-tag', 'test-value') + dhcp1_id = dhcp1.id + + dhcp2 = conn.create_dhcp_options('test2.com', ['10.0.20.2']) + dhcp2.add_tag('Name', 'TestDhcpOptions2') + dhcp2.add_tag('test-tag', 'test-value') + dhcp2_id = dhcp2.id + + dhcp_options_sets = conn.get_all_dhcp_options() + dhcp_options_sets.should.have.length_of(2) + + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp1_id}) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test1.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.10.2') + + dhcp_options_sets = conn.get_all_dhcp_options( + filters={'dhcp-options-id': dhcp2_id}) + + dhcp_options_sets.should.have.length_of(1) + dhcp_options_sets[0].options['domain-name'][0].should.be.equal('test2.com') + dhcp_options_sets[0].options[ + 'domain-name-servers'][0].should.be.equal('10.0.20.2') + + +@mock_ec2 +def test_dhcp_options_get_by_value_filter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} + ]) + + filters = [{'Name': 'value', 'Values': ['10.0.10.2']}] + dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) + dhcp_options_sets.should.have.length_of(1) + + +@mock_ec2 +def test_dhcp_options_get_by_key_filter(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.10.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.20.2']} + ]) + + ec2.create_dhcp_options(DhcpConfigurations=[ + {'Key': 'domain-name', 'Values': ['example.com']}, + {'Key': 'domain-name-servers', 'Values': ['10.0.30.2']} + ]) + + filters = [{'Name': 'key', 'Values': ['domain-name']}] + dhcp_options_sets = list(ec2.dhcp_options_sets.filter(Filters=filters)) + dhcp_options_sets.should.have.length_of(3) + + +@mock_ec2_deprecated +def test_dhcp_options_get_by_invalid_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + filters = {'invalid-filter': 'invalid-value'} + + conn.get_all_dhcp_options.when.called_with( + filters=filters).should.throw(NotImplementedError) diff --git a/tests/test_ec2/test_ec2_core.py b/tests/test_ec2/test_ec2_core.py index baffc4882..78b780d97 100644 --- a/tests/test_ec2/test_ec2_core.py +++ b/tests/test_ec2/test_ec2_core.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 442e41dde..a5583f44b 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -1,665 +1,665 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -from moto.ec2 import ec2_backends -import boto -import boto3 -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 - - -@mock_ec2_deprecated -def test_create_and_delete_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - all_volumes = conn.get_all_volumes() - - current_volume = [item for item in all_volumes if item.id == volume.id] - current_volume.should.have.length_of(1) - current_volume[0].size.should.equal(80) - current_volume[0].zone.should.equal("us-east-1a") - current_volume[0].encrypted.should.be(False) - - volume = current_volume[0] - - with assert_raises(EC2ResponseError) as ex: - volume.delete(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.delete() - - all_volumes = conn.get_all_volumes() - my_volume = [item for item in all_volumes if item.id == volume.id] - my_volume.should.have.length_of(0) - - # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: - volume.delete() - cm.exception.code.should.equal('InvalidVolume.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_encrypted_volume_dryrun(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as ex: - conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - - -@mock_ec2_deprecated -def test_create_encrypted_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - - with assert_raises(EC2ResponseError) as ex: - conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - - all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] - all_volumes[0].encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_filter_volume_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(80, "us-east-1a") - volume2 = conn.create_volume(36, "us-east-1b") - volume3 = conn.create_volume(20, "us-east-1c") - vol1 = conn.get_all_volumes(volume_ids=volume3.id) - vol1.should.have.length_of(1) - vol1[0].size.should.equal(20) - vol1[0].zone.should.equal('us-east-1c') - vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) - vol2.should.have.length_of(2) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_volumes(volume_ids=['vol-does_not_exist']) - cm.exception.code.should.equal('InvalidVolume.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_volume_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.update() - - volume1 = conn.create_volume(80, "us-east-1a", encrypted=True) - volume2 = conn.create_volume(36, "us-east-1b", encrypted=False) - volume3 = conn.create_volume(20, "us-east-1c", encrypted=True) - - snapshot = volume3.create_snapshot(description='testsnap') - volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot) - - conn.create_tags([volume1.id], {'testkey1': 'testvalue1'}) - conn.create_tags([volume2.id], {'testkey2': 'testvalue2'}) - - volume1.update() - volume2.update() - volume3.update() - volume4.update() - - block_mapping = instance.block_device_mapping['/dev/sda1'] - - volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) - - volumes_by_attach_time = conn.get_all_volumes( - filters={'attachment.attach-time': block_mapping.attach_time}) - set([vol.id for vol in volumes_by_attach_time] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_device = conn.get_all_volumes( - filters={'attachment.device': '/dev/sda1'}) - set([vol.id for vol in volumes_by_attach_device] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_instance_id = conn.get_all_volumes( - filters={'attachment.instance-id': instance.id}) - set([vol.id for vol in volumes_by_attach_instance_id] - ).should.equal({block_mapping.volume_id}) - - volumes_by_attach_status = conn.get_all_volumes( - filters={'attachment.status': 'attached'}) - set([vol.id for vol in volumes_by_attach_status] - ).should.equal({block_mapping.volume_id}) - - volumes_by_create_time = conn.get_all_volumes( - filters={'create-time': volume4.create_time}) - set([vol.create_time for vol in volumes_by_create_time] - ).should.equal({volume4.create_time}) - - volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) - set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) - - volumes_by_snapshot_id = conn.get_all_volumes( - filters={'snapshot-id': snapshot.id}) - set([vol.id for vol in volumes_by_snapshot_id] - ).should.equal({volume4.id}) - - volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) - set([vol.id for vol in volumes_by_status]).should.equal( - {block_mapping.volume_id}) - - volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) - set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) - - volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) - set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) - - volumes_by_tag_value = conn.get_all_volumes( - filters={'tag-value': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag_value] - ).should.equal({volume1.id}) - - volumes_by_tag = conn.get_all_volumes( - filters={'tag:testkey1': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) - - volumes_by_unencrypted = conn.get_all_volumes( - filters={'encrypted': 'false'}) - set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( - {block_mapping.volume_id, volume2.id} - ) - - volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) - set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( - {volume1.id, volume3.id, volume4.id} - ) - - volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) - set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( - {volume2.id} - ) - - -@mock_ec2_deprecated -def test_volume_attach_and_detach(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - volume = conn.create_volume(80, "us-east-1a") - - volume.update() - volume.volume_state().should.equal('available') - - with assert_raises(EC2ResponseError) as ex: - volume.attach(instance.id, "/dev/sdh", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.attach(instance.id, "/dev/sdh") - - volume.update() - volume.volume_state().should.equal('in-use') - volume.attachment_state().should.equal('attached') - - volume.attach_data.instance_id.should.equal(instance.id) - - with assert_raises(EC2ResponseError) as ex: - volume.detach(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') - - volume.detach() - - volume.update() - volume.volume_state().should.equal('available') - - with assert_raises(EC2ResponseError) as cm1: - volume.attach('i-1234abcd', "/dev/sdh") - cm1.exception.code.should.equal('InvalidInstanceID.NotFound') - cm1.exception.status.should.equal(400) - cm1.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm2: - conn.detach_volume(volume.id, instance.id, "/dev/sdh") - cm2.exception.code.should.equal('InvalidAttachment.NotFound') - cm2.exception.status.should.equal(400) - cm2.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm3: - conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh") - cm3.exception.code.should.equal('InvalidInstanceID.NotFound') - cm3.exception.status.should.equal(400) - cm3.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - with assert_raises(EC2ResponseError) as ex: - snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - snapshots.should.have.length_of(1) - snapshots[0].description.should.equal('a test snapshot') - snapshots[0].start_time.should_not.be.none - snapshots[0].encrypted.should.be(False) - - # Create snapshot without description - num_snapshots = len(conn.get_all_snapshots()) - - snapshot = volume.create_snapshot() - conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) - - snapshot.delete() - conn.get_all_snapshots().should.have.length_of(num_snapshots) - - # Deleting something that was already deleted should throw an error - with assert_raises(EC2ResponseError) as cm: - snapshot.delete() - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_create_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - snapshots.should.have.length_of(1) - snapshots[0].description.should.equal('a test snapshot') - snapshots[0].start_time.should_not.be.none - snapshots[0].encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_filter_snapshot_by_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(36, "us-east-1a") - snap1 = volume1.create_snapshot('a test snapshot 1') - volume2 = conn.create_volume(42, 'us-east-1a') - snap2 = volume2.create_snapshot('a test snapshot 2') - volume3 = conn.create_volume(84, 'us-east-1a') - snap3 = volume3.create_snapshot('a test snapshot 3') - snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id) - snapshots1.should.have.length_of(1) - snapshots1[0].volume_id.should.equal(volume2.id) - snapshots1[0].region.name.should.equal(conn.region.name) - snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id]) - snapshots2.should.have.length_of(2) - for s in snapshots2: - s.start_time.should_not.be.none - s.volume_id.should.be.within([volume2.id, volume3.id]) - s.region.name.should.equal(conn.region.name) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_snapshots(snapshot_ids=['snap-does_not_exist']) - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_snapshot_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) - volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) - - snapshot1 = volume1.create_snapshot(description='testsnapshot1') - snapshot2 = volume1.create_snapshot(description='testsnapshot2') - snapshot3 = volume2.create_snapshot(description='testsnapshot3') - - conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) - conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) - - snapshots_by_description = conn.get_all_snapshots( - filters={'description': 'testsnapshot1'}) - set([snap.id for snap in snapshots_by_description] - ).should.equal({snapshot1.id}) - - snapshots_by_id = conn.get_all_snapshots( - filters={'snapshot-id': snapshot1.id}) - set([snap.id for snap in snapshots_by_id] - ).should.equal({snapshot1.id}) - - snapshots_by_start_time = conn.get_all_snapshots( - filters={'start-time': snapshot1.start_time}) - set([snap.start_time for snap in snapshots_by_start_time] - ).should.equal({snapshot1.start_time}) - - snapshots_by_volume_id = conn.get_all_snapshots( - filters={'volume-id': volume1.id}) - set([snap.id for snap in snapshots_by_volume_id] - ).should.equal({snapshot1.id, snapshot2.id}) - - snapshots_by_status = conn.get_all_snapshots( - filters={'status': 'completed'}) - ({snapshot1.id, snapshot2.id, snapshot3.id} - - {snap.id for snap in snapshots_by_status}).should.have.length_of(0) - - snapshots_by_volume_size = conn.get_all_snapshots( - filters={'volume-size': volume1.size}) - set([snap.id for snap in snapshots_by_volume_size] - ).should.equal({snapshot1.id, snapshot2.id}) - - snapshots_by_tag_key = conn.get_all_snapshots( - filters={'tag-key': 'testkey1'}) - set([snap.id for snap in snapshots_by_tag_key] - ).should.equal({snapshot1.id}) - - snapshots_by_tag_value = conn.get_all_snapshots( - filters={'tag-value': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag_value] - ).should.equal({snapshot1.id}) - - snapshots_by_tag = conn.get_all_snapshots( - filters={'tag:testkey1': 'testvalue1'}) - set([snap.id for snap in snapshots_by_tag] - ).should.equal({snapshot1.id}) - - snapshots_by_encrypted = conn.get_all_snapshots( - filters={'encrypted': 'true'}) - set([snap.id for snap in snapshots_by_encrypted] - ).should.equal({snapshot3.id}) - - -@mock_ec2_deprecated -def test_snapshot_attribute(): - import copy - - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - snapshot = volume.create_snapshot() - - # Baseline - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.name.should.equal('create_volume_permission') - attributes.attrs.should.have.length_of(0) - - ADD_GROUP_ARGS = {'snapshot_id': snapshot.id, - 'attribute': 'createVolumePermission', - 'operation': 'add', - 'groups': 'all'} - - REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id, - 'attribute': 'createVolumePermission', - 'operation': 'remove', - 'groups': 'all'} - - # Add 'all' group and confirm - - with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute( - **dict(ADD_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) - - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.attrs['groups'].should.have.length_of(1) - attributes.attrs['groups'].should.equal(['all']) - - # Add is idempotent - conn.modify_snapshot_attribute.when.called_with( - **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Remove 'all' group and confirm - with assert_raises(EC2ResponseError) as ex: - conn.modify_snapshot_attribute( - **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) - - attributes = conn.get_snapshot_attribute( - snapshot.id, attribute='createVolumePermission') - attributes.attrs.should.have.length_of(0) - - # Remove is idempotent - conn.modify_snapshot_attribute.when.called_with( - **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) - - # Error: Add with group != 'all' - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute(snapshot.id, - attribute='createVolumePermission', - operation='add', - groups='everyone') - cm.exception.code.should.equal('InvalidAMIAttributeItemValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute("snapshot-abcd1234", - attribute='createVolumePermission', - operation='add', - groups='all') - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Remove with invalid snapshot ID - with assert_raises(EC2ResponseError) as cm: - conn.modify_snapshot_attribute("snapshot-abcd1234", - attribute='createVolumePermission', - operation='remove', - groups='all') - cm.exception.code.should.equal('InvalidSnapshot.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Add or remove with user ID instead of group - conn.modify_snapshot_attribute.when.called_with(snapshot.id, - attribute='createVolumePermission', - operation='add', - user_ids=['user']).should.throw(NotImplementedError) - conn.modify_snapshot_attribute.when.called_with(snapshot.id, - attribute='createVolumePermission', - operation='remove', - user_ids=['user']).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_create_volume_from_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - snapshot = volume.create_snapshot('a test snapshot') - - with assert_raises(EC2ResponseError) as ex: - snapshot = volume.create_snapshot('a test snapshot', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - new_volume = snapshot.create_volume('us-east-1a') - new_volume.size.should.equal(80) - new_volume.snapshot_id.should.equal(snapshot.id) - - -@mock_ec2_deprecated -def test_create_volume_from_encrypted_snapshot(): - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a", encrypted=True) - - snapshot = volume.create_snapshot('a test snapshot') - snapshot.update() - snapshot.status.should.equal('completed') - - new_volume = snapshot.create_volume('us-east-1a') - new_volume.size.should.equal(80) - new_volume.snapshot_id.should.equal(snapshot.id) - new_volume.encrypted.should.be(True) - - -@mock_ec2_deprecated -def test_modify_attribute_blockDeviceMapping(): - """ - Reproduces the missing feature explained at [0], where we want to mock a - call to modify an instance attribute of type: blockDeviceMapping. - - [0] https://github.com/spulec/moto/issues/160 - """ - conn = boto.ec2.connect_to_region("us-east-1") - - reservation = conn.run_instances('ami-1234abcd') - - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute('blockDeviceMapping', { - '/dev/sda1': True}, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) - - instance = ec2_backends[conn.region.name].get_instance(instance.id) - instance.block_device_mapping.should.have.key('/dev/sda1') - instance.block_device_mapping[ - '/dev/sda1'].delete_on_termination.should.be(True) - - -@mock_ec2_deprecated -def test_volume_tag_escaping(): - conn = boto.connect_ec2('the_key', 'the_secret') - vol = conn.create_volume(10, 'us-east-1a') - snapshot = conn.create_snapshot(vol.id, 'Desc') - - with assert_raises(EC2ResponseError) as ex: - snapshot.add_tags({'key': ''}, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - dict(snaps[0].tags).should_not.be.equal( - {'key': ''}) - - snapshot.add_tags({'key': ''}) - - snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] - dict(snaps[0].tags).should.equal({'key': ''}) - - -@freeze_time -@mock_ec2 -def test_copy_snapshot(): - ec2_client = boto3.client('ec2', region_name='eu-west-1') - dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') - - volume_response = ec2_client.create_volume( - AvailabilityZone='eu-west-1a', Size=10 - ) - - create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response['VolumeId'] - ) - - copy_snapshot_response = dest_ec2_client.copy_snapshot( - SourceSnapshotId=create_snapshot_response['SnapshotId'], - SourceRegion="eu-west-1" - ) - - ec2 = boto3.resource('ec2', region_name='eu-west-1') - dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') - - source = ec2.Snapshot(create_snapshot_response['SnapshotId']) - dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) - - attribs = ['data_encryption_key_id', 'encrypted', - 'kms_key_id', 'owner_alias', 'owner_id', - 'progress', 'state', 'state_message', - 'tags', 'volume_id', 'volume_size'] - - for attrib in attribs: - getattr(source, attrib).should.equal(getattr(dest, attrib)) - - # Copy from non-existent source ID. - with assert_raises(ClientError) as cm: - create_snapshot_error = ec2_client.create_snapshot( - VolumeId='vol-abcd1234' - ) - cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') - cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") - cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none - cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - - # Copy from non-existent source region. - with assert_raises(ClientError) as cm: - copy_snapshot_response = dest_ec2_client.copy_snapshot( - SourceSnapshotId=create_snapshot_response['SnapshotId'], - SourceRegion="eu-west-2" - ) - cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') - cm.exception.response['Error']['Message'].should.be.none - cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none - cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) - -@mock_ec2 -def test_search_for_many_snapshots(): - ec2_client = boto3.client('ec2', region_name='eu-west-1') - - volume_response = ec2_client.create_volume( - AvailabilityZone='eu-west-1a', Size=10 - ) - - snapshot_ids = [] - for i in range(1, 20): - create_snapshot_response = ec2_client.create_snapshot( - VolumeId=volume_response['VolumeId'] - ) - snapshot_ids.append(create_snapshot_response['SnapshotId']) - - snapshots_response = ec2_client.describe_snapshots( - SnapshotIds=snapshot_ids - ) - - assert len(snapshots_response['Snapshots']) == len(snapshot_ids) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto.ec2 import ec2_backends +import boto +import boto3 +from botocore.exceptions import ClientError +from boto.exception import EC2ResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 + + +@mock_ec2_deprecated +def test_create_and_delete_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + all_volumes = conn.get_all_volumes() + + current_volume = [item for item in all_volumes if item.id == volume.id] + current_volume.should.have.length_of(1) + current_volume[0].size.should.equal(80) + current_volume[0].zone.should.equal("us-east-1a") + current_volume[0].encrypted.should.be(False) + + volume = current_volume[0] + + with assert_raises(EC2ResponseError) as ex: + volume.delete(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.delete() + + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) + + # Deleting something that was already deleted should throw an error + with assert_raises(EC2ResponseError) as cm: + volume.delete() + cm.exception.code.should.equal('InvalidVolume.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_encrypted_volume_dryrun(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as ex: + conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + + +@mock_ec2_deprecated +def test_create_encrypted_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + + with assert_raises(EC2ResponseError) as ex: + conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') + + all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] + all_volumes[0].encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_filter_volume_by_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(80, "us-east-1a") + volume2 = conn.create_volume(36, "us-east-1b") + volume3 = conn.create_volume(20, "us-east-1c") + vol1 = conn.get_all_volumes(volume_ids=volume3.id) + vol1.should.have.length_of(1) + vol1[0].size.should.equal(20) + vol1[0].zone.should.equal('us-east-1c') + vol2 = conn.get_all_volumes(volume_ids=[volume1.id, volume2.id]) + vol2.should.have.length_of(2) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_volumes(volume_ids=['vol-does_not_exist']) + cm.exception.code.should.equal('InvalidVolume.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_volume_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.update() + + volume1 = conn.create_volume(80, "us-east-1a", encrypted=True) + volume2 = conn.create_volume(36, "us-east-1b", encrypted=False) + volume3 = conn.create_volume(20, "us-east-1c", encrypted=True) + + snapshot = volume3.create_snapshot(description='testsnap') + volume4 = conn.create_volume(25, "us-east-1a", snapshot=snapshot) + + conn.create_tags([volume1.id], {'testkey1': 'testvalue1'}) + conn.create_tags([volume2.id], {'testkey2': 'testvalue2'}) + + volume1.update() + volume2.update() + volume3.update() + volume4.update() + + block_mapping = instance.block_device_mapping['/dev/sda1'] + + volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) + + volumes_by_attach_time = conn.get_all_volumes( + filters={'attachment.attach-time': block_mapping.attach_time}) + set([vol.id for vol in volumes_by_attach_time] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_device = conn.get_all_volumes( + filters={'attachment.device': '/dev/sda1'}) + set([vol.id for vol in volumes_by_attach_device] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_instance_id = conn.get_all_volumes( + filters={'attachment.instance-id': instance.id}) + set([vol.id for vol in volumes_by_attach_instance_id] + ).should.equal({block_mapping.volume_id}) + + volumes_by_attach_status = conn.get_all_volumes( + filters={'attachment.status': 'attached'}) + set([vol.id for vol in volumes_by_attach_status] + ).should.equal({block_mapping.volume_id}) + + volumes_by_create_time = conn.get_all_volumes( + filters={'create-time': volume4.create_time}) + set([vol.create_time for vol in volumes_by_create_time] + ).should.equal({volume4.create_time}) + + volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) + set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) + + volumes_by_snapshot_id = conn.get_all_volumes( + filters={'snapshot-id': snapshot.id}) + set([vol.id for vol in volumes_by_snapshot_id] + ).should.equal({volume4.id}) + + volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) + set([vol.id for vol in volumes_by_status]).should.equal( + {block_mapping.volume_id}) + + volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) + set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) + + volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) + set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) + + volumes_by_tag_value = conn.get_all_volumes( + filters={'tag-value': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag_value] + ).should.equal({volume1.id}) + + volumes_by_tag = conn.get_all_volumes( + filters={'tag:testkey1': 'testvalue1'}) + set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) + + volumes_by_unencrypted = conn.get_all_volumes( + filters={'encrypted': 'false'}) + set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( + {block_mapping.volume_id, volume2.id} + ) + + volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) + set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( + {volume1.id, volume3.id, volume4.id} + ) + + volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) + set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( + {volume2.id} + ) + + +@mock_ec2_deprecated +def test_volume_attach_and_detach(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + volume = conn.create_volume(80, "us-east-1a") + + volume.update() + volume.volume_state().should.equal('available') + + with assert_raises(EC2ResponseError) as ex: + volume.attach(instance.id, "/dev/sdh", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.attach(instance.id, "/dev/sdh") + + volume.update() + volume.volume_state().should.equal('in-use') + volume.attachment_state().should.equal('attached') + + volume.attach_data.instance_id.should.equal(instance.id) + + with assert_raises(EC2ResponseError) as ex: + volume.detach(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachVolume operation: Request would have succeeded, but DryRun flag is set') + + volume.detach() + + volume.update() + volume.volume_state().should.equal('available') + + with assert_raises(EC2ResponseError) as cm1: + volume.attach('i-1234abcd', "/dev/sdh") + cm1.exception.code.should.equal('InvalidInstanceID.NotFound') + cm1.exception.status.should.equal(400) + cm1.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm2: + conn.detach_volume(volume.id, instance.id, "/dev/sdh") + cm2.exception.code.should.equal('InvalidAttachment.NotFound') + cm2.exception.status.should.equal(400) + cm2.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm3: + conn.detach_volume(volume.id, 'i-1234abcd', "/dev/sdh") + cm3.exception.code.should.equal('InvalidInstanceID.NotFound') + cm3.exception.status.should.equal(400) + cm3.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + with assert_raises(EC2ResponseError) as ex: + snapshot = volume.create_snapshot('a dryrun snapshot', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + snapshots.should.have.length_of(1) + snapshots[0].description.should.equal('a test snapshot') + snapshots[0].start_time.should_not.be.none + snapshots[0].encrypted.should.be(False) + + # Create snapshot without description + num_snapshots = len(conn.get_all_snapshots()) + + snapshot = volume.create_snapshot() + conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) + + snapshot.delete() + conn.get_all_snapshots().should.have.length_of(num_snapshots) + + # Deleting something that was already deleted should throw an error + with assert_raises(EC2ResponseError) as cm: + snapshot.delete() + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_create_encrypted_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + snapshots.should.have.length_of(1) + snapshots[0].description.should.equal('a test snapshot') + snapshots[0].start_time.should_not.be.none + snapshots[0].encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_filter_snapshot_by_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(36, "us-east-1a") + snap1 = volume1.create_snapshot('a test snapshot 1') + volume2 = conn.create_volume(42, 'us-east-1a') + snap2 = volume2.create_snapshot('a test snapshot 2') + volume3 = conn.create_volume(84, 'us-east-1a') + snap3 = volume3.create_snapshot('a test snapshot 3') + snapshots1 = conn.get_all_snapshots(snapshot_ids=snap2.id) + snapshots1.should.have.length_of(1) + snapshots1[0].volume_id.should.equal(volume2.id) + snapshots1[0].region.name.should.equal(conn.region.name) + snapshots2 = conn.get_all_snapshots(snapshot_ids=[snap2.id, snap3.id]) + snapshots2.should.have.length_of(2) + for s in snapshots2: + s.start_time.should_not.be.none + s.volume_id.should.be.within([volume2.id, volume3.id]) + s.region.name.should.equal(conn.region.name) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_snapshots(snapshot_ids=['snap-does_not_exist']) + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_snapshot_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume1 = conn.create_volume(20, "us-east-1a", encrypted=False) + volume2 = conn.create_volume(25, "us-east-1a", encrypted=True) + + snapshot1 = volume1.create_snapshot(description='testsnapshot1') + snapshot2 = volume1.create_snapshot(description='testsnapshot2') + snapshot3 = volume2.create_snapshot(description='testsnapshot3') + + conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'}) + conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'}) + + snapshots_by_description = conn.get_all_snapshots( + filters={'description': 'testsnapshot1'}) + set([snap.id for snap in snapshots_by_description] + ).should.equal({snapshot1.id}) + + snapshots_by_id = conn.get_all_snapshots( + filters={'snapshot-id': snapshot1.id}) + set([snap.id for snap in snapshots_by_id] + ).should.equal({snapshot1.id}) + + snapshots_by_start_time = conn.get_all_snapshots( + filters={'start-time': snapshot1.start_time}) + set([snap.start_time for snap in snapshots_by_start_time] + ).should.equal({snapshot1.start_time}) + + snapshots_by_volume_id = conn.get_all_snapshots( + filters={'volume-id': volume1.id}) + set([snap.id for snap in snapshots_by_volume_id] + ).should.equal({snapshot1.id, snapshot2.id}) + + snapshots_by_status = conn.get_all_snapshots( + filters={'status': 'completed'}) + ({snapshot1.id, snapshot2.id, snapshot3.id} - + {snap.id for snap in snapshots_by_status}).should.have.length_of(0) + + snapshots_by_volume_size = conn.get_all_snapshots( + filters={'volume-size': volume1.size}) + set([snap.id for snap in snapshots_by_volume_size] + ).should.equal({snapshot1.id, snapshot2.id}) + + snapshots_by_tag_key = conn.get_all_snapshots( + filters={'tag-key': 'testkey1'}) + set([snap.id for snap in snapshots_by_tag_key] + ).should.equal({snapshot1.id}) + + snapshots_by_tag_value = conn.get_all_snapshots( + filters={'tag-value': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag_value] + ).should.equal({snapshot1.id}) + + snapshots_by_tag = conn.get_all_snapshots( + filters={'tag:testkey1': 'testvalue1'}) + set([snap.id for snap in snapshots_by_tag] + ).should.equal({snapshot1.id}) + + snapshots_by_encrypted = conn.get_all_snapshots( + filters={'encrypted': 'true'}) + set([snap.id for snap in snapshots_by_encrypted] + ).should.equal({snapshot3.id}) + + +@mock_ec2_deprecated +def test_snapshot_attribute(): + import copy + + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + snapshot = volume.create_snapshot() + + # Baseline + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.name.should.equal('create_volume_permission') + attributes.attrs.should.have.length_of(0) + + ADD_GROUP_ARGS = {'snapshot_id': snapshot.id, + 'attribute': 'createVolumePermission', + 'operation': 'add', + 'groups': 'all'} + + REMOVE_GROUP_ARGS = {'snapshot_id': snapshot.id, + 'attribute': 'createVolumePermission', + 'operation': 'remove', + 'groups': 'all'} + + # Add 'all' group and confirm + + with assert_raises(EC2ResponseError) as ex: + conn.modify_snapshot_attribute( + **dict(ADD_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_snapshot_attribute(**ADD_GROUP_ARGS) + + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.attrs['groups'].should.have.length_of(1) + attributes.attrs['groups'].should.equal(['all']) + + # Add is idempotent + conn.modify_snapshot_attribute.when.called_with( + **ADD_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Remove 'all' group and confirm + with assert_raises(EC2ResponseError) as ex: + conn.modify_snapshot_attribute( + **dict(REMOVE_GROUP_ARGS, **{'dry_run': True})) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySnapshotAttribute operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_snapshot_attribute(**REMOVE_GROUP_ARGS) + + attributes = conn.get_snapshot_attribute( + snapshot.id, attribute='createVolumePermission') + attributes.attrs.should.have.length_of(0) + + # Remove is idempotent + conn.modify_snapshot_attribute.when.called_with( + **REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError) + + # Error: Add with group != 'all' + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute(snapshot.id, + attribute='createVolumePermission', + operation='add', + groups='everyone') + cm.exception.code.should.equal('InvalidAMIAttributeItemValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add with invalid snapshot ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute("snapshot-abcd1234", + attribute='createVolumePermission', + operation='add', + groups='all') + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Remove with invalid snapshot ID + with assert_raises(EC2ResponseError) as cm: + conn.modify_snapshot_attribute("snapshot-abcd1234", + attribute='createVolumePermission', + operation='remove', + groups='all') + cm.exception.code.should.equal('InvalidSnapshot.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Add or remove with user ID instead of group + conn.modify_snapshot_attribute.when.called_with(snapshot.id, + attribute='createVolumePermission', + operation='add', + user_ids=['user']).should.throw(NotImplementedError) + conn.modify_snapshot_attribute.when.called_with(snapshot.id, + attribute='createVolumePermission', + operation='remove', + user_ids=['user']).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_create_volume_from_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + snapshot = volume.create_snapshot('a test snapshot') + + with assert_raises(EC2ResponseError) as ex: + snapshot = volume.create_snapshot('a test snapshot', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSnapshot operation: Request would have succeeded, but DryRun flag is set') + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + new_volume = snapshot.create_volume('us-east-1a') + new_volume.size.should.equal(80) + new_volume.snapshot_id.should.equal(snapshot.id) + + +@mock_ec2_deprecated +def test_create_volume_from_encrypted_snapshot(): + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a", encrypted=True) + + snapshot = volume.create_snapshot('a test snapshot') + snapshot.update() + snapshot.status.should.equal('completed') + + new_volume = snapshot.create_volume('us-east-1a') + new_volume.size.should.equal(80) + new_volume.snapshot_id.should.equal(snapshot.id) + new_volume.encrypted.should.be(True) + + +@mock_ec2_deprecated +def test_modify_attribute_blockDeviceMapping(): + """ + Reproduces the missing feature explained at [0], where we want to mock a + call to modify an instance attribute of type: blockDeviceMapping. + + [0] https://github.com/spulec/moto/issues/160 + """ + conn = boto.ec2.connect_to_region("us-east-1") + + reservation = conn.run_instances('ami-1234abcd') + + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute('blockDeviceMapping', { + '/dev/sda1': True}, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceAttribute operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute('blockDeviceMapping', {'/dev/sda1': True}) + + instance = ec2_backends[conn.region.name].get_instance(instance.id) + instance.block_device_mapping.should.have.key('/dev/sda1') + instance.block_device_mapping[ + '/dev/sda1'].delete_on_termination.should.be(True) + + +@mock_ec2_deprecated +def test_volume_tag_escaping(): + conn = boto.connect_ec2('the_key', 'the_secret') + vol = conn.create_volume(10, 'us-east-1a') + snapshot = conn.create_snapshot(vol.id, 'Desc') + + with assert_raises(EC2ResponseError) as ex: + snapshot.add_tags({'key': ''}, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should_not.be.equal( + {'key': ''}) + + snapshot.add_tags({'key': ''}) + + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should.equal({'key': ''}) + + +@freeze_time +@mock_ec2 +def test_copy_snapshot(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-1" + ) + + ec2 = boto3.resource('ec2', region_name='eu-west-1') + dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') + + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) + dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) + + attribs = ['data_encryption_key_id', 'encrypted', + 'kms_key_id', 'owner_alias', 'owner_id', + 'progress', 'state', 'state_message', + 'tags', 'volume_id', 'volume_size'] + + for attrib in attribs: + getattr(source, attrib).should.equal(getattr(dest, attrib)) + + # Copy from non-existent source ID. + with assert_raises(ClientError) as cm: + create_snapshot_error = ec2_client.create_snapshot( + VolumeId='vol-abcd1234' + ) + cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') + cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + # Copy from non-existent source region. + with assert_raises(ClientError) as cm: + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-2" + ) + cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') + cm.exception.response['Error']['Message'].should.be.none + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + +@mock_ec2 +def test_search_for_many_snapshots(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + snapshot_ids = [] + for i in range(1, 20): + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + snapshot_ids.append(create_snapshot_response['SnapshotId']) + + snapshots_response = ec2_client.describe_snapshots( + SnapshotIds=snapshot_ids + ) + + assert len(snapshots_response['Snapshots']) == len(snapshot_ids) diff --git a/tests/test_ec2/test_elastic_ip_addresses.py b/tests/test_ec2/test_elastic_ip_addresses.py index ca6637b18..3fad7fd3c 100644 --- a/tests/test_ec2/test_elastic_ip_addresses.py +++ b/tests/test_ec2/test_elastic_ip_addresses.py @@ -1,514 +1,514 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import six - -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -import logging - - -@mock_ec2_deprecated -def test_eip_allocate_classic(): - """Allocate/release Classic EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - standard = conn.allocate_address(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') - - standard = conn.allocate_address() - standard.should.be.a(boto.ec2.address.Address) - standard.public_ip.should.be.a(six.text_type) - standard.instance_id.should.be.none - standard.domain.should.be.equal("standard") - - with assert_raises(EC2ResponseError) as ex: - standard.release(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') - - standard.release() - standard.should_not.be.within(conn.get_all_addresses()) - - -@mock_ec2_deprecated -def test_eip_allocate_vpc(): - """Allocate/release VPC EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - vpc = conn.allocate_address(domain="vpc", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') - - vpc = conn.allocate_address(domain="vpc") - vpc.should.be.a(boto.ec2.address.Address) - vpc.domain.should.be.equal("vpc") - logging.debug("vpc alloc_id:".format(vpc.allocation_id)) - vpc.release() - -@mock_ec2 -def test_specific_eip_allocate_vpc(): - """Allocate VPC EIP with specific address""" - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - - vpc = client.allocate_address(Domain="vpc", Address="127.38.43.222") - vpc['Domain'].should.be.equal("vpc") - vpc['PublicIp'].should.be.equal("127.38.43.222") - logging.debug("vpc alloc_id:".format(vpc['AllocationId'])) - - -@mock_ec2_deprecated -def test_eip_allocate_invalid_domain(): - """Allocate EIP invalid domain""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.allocate_address(domain="bogus") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_associate_classic(): - """Associate/Disassociate EIP to classic instance""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address() - eip.instance_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(public_ip=eip.public_ip) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as ex: - conn.associate_address(instance_id=instance.id, - public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') - - conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(instance.id) - - with assert_raises(EC2ResponseError) as ex: - conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') - - conn.disassociate_address(public_ip=eip.public_ip) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(u'') - eip.release() - eip.should_not.be.within(conn.get_all_addresses()) - eip = None - - instance.terminate() - - -@mock_ec2_deprecated -def test_eip_associate_vpc(): - """Associate/Disassociate EIP to VPC instance""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address(domain='vpc') - eip.instance_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(allocation_id=eip.allocation_id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address(instance_id=instance.id, - allocation_id=eip.allocation_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(instance.id) - conn.disassociate_address(association_id=eip.association_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.instance_id.should.be.equal(u'') - eip.association_id.should.be.none - - with assert_raises(EC2ResponseError) as ex: - eip.release(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') - - eip.release() - eip = None - - instance.terminate() - - -@mock_ec2 -def test_eip_boto3_vpc_association(): - """Associate EIP to VPC instance in a new subnet with boto3""" - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') - subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') - instance = service.create_instances(**{ - 'InstanceType': 't2.micro', - 'ImageId': 'ami-test', - 'MinCount': 1, - 'MaxCount': 1, - 'SubnetId': subnet_res['Subnet']['SubnetId'] - })[0] - allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] - address = service.VpcAddress(allocation_id) - address.load() - address.association_id.should.be.none - address.instance_id.should.be.empty - address.network_interface_id.should.be.empty - association_id = client.associate_address( - InstanceId=instance.id, - AllocationId=allocation_id, - AllowReassociation=False) - instance.load() - address.reload() - address.association_id.should_not.be.none - instance.public_ip_address.should_not.be.none - instance.public_dns_name.should_not.be.none - address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId')) - address.public_ip.should.equal(instance.public_ip_address) - address.instance_id.should.equal(instance.id) - - client.disassociate_address(AssociationId=address.association_id) - instance.reload() - address.reload() - instance.public_ip_address.should.be.none - address.network_interface_id.should.be.empty - address.association_id.should.be.none - address.instance_id.should.be.empty - - -@mock_ec2_deprecated -def test_eip_associate_network_interface(): - """Associate/Disassociate EIP to NIC""" - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - eni = conn.create_network_interface(subnet.id) - - eip = conn.allocate_address(domain='vpc') - eip.network_interface_id.should.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(network_interface_id=eni.id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address(network_interface_id=eni.id, - allocation_id=eip.allocation_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.network_interface_id.should.be.equal(eni.id) - - conn.disassociate_address(association_id=eip.association_id) - # no .update() on address ): - eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] - eip.network_interface_id.should.be.equal(u'') - eip.association_id.should.be.none - eip.release() - eip = None - - -@mock_ec2_deprecated -def test_eip_reassociate(): - """reassociate EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - - eip = conn.allocate_address() - conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) - - # Same ID is idempotent - conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) - - # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: - conn.associate_address( - instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address.when.called_with( - instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) - - eip.release() - eip = None - - instance1.terminate() - instance2.terminate() - - -@mock_ec2_deprecated -def test_eip_reassociate_nic(): - """reassociate EIP""" - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - eni1 = conn.create_network_interface(subnet.id) - eni2 = conn.create_network_interface(subnet.id) - - eip = conn.allocate_address() - conn.associate_address(network_interface_id=eni1.id, - public_ip=eip.public_ip) - - # Same ID is idempotent - conn.associate_address(network_interface_id=eni1.id, - public_ip=eip.public_ip) - - # Different ID detects resource association - with assert_raises(EC2ResponseError) as cm: - conn.associate_address( - network_interface_id=eni2.id, public_ip=eip.public_ip) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.associate_address.when.called_with( - network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) - - eip.release() - eip = None - - -@mock_ec2_deprecated -def test_eip_associate_invalid_args(): - """Associate EIP, invalid args """ - conn = boto.connect_ec2('the_key', 'the_secret') - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - eip = conn.allocate_address() - - with assert_raises(EC2ResponseError) as cm: - conn.associate_address(instance_id=instance.id) - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - instance.terminate() - - -@mock_ec2_deprecated -def test_eip_disassociate_bogus_association(): - """Disassociate bogus EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_address(association_id="bogus") - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_release_bogus_eip(): - """Release bogus EIP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.release_address(allocation_id="bogus") - cm.exception.code.should.equal('InvalidAllocationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_disassociate_arg_error(): - """Invalid arguments disassociate address""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_address() - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_release_arg_error(): - """Invalid arguments release address""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.release_address() - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_eip_describe(): - """Listing of allocated Elastic IP Addresses.""" - conn = boto.connect_ec2('the_key', 'the_secret') - eips = [] - number_of_classic_ips = 2 - number_of_vpc_ips = 2 - - # allocate some IPs - for _ in range(number_of_classic_ips): - eips.append(conn.allocate_address()) - for _ in range(number_of_vpc_ips): - eips.append(conn.allocate_address(domain='vpc')) - len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips) - - # Can we find each one individually? - for eip in eips: - if eip.allocation_id: - lookup_addresses = conn.get_all_addresses( - allocation_ids=[eip.allocation_id]) - else: - lookup_addresses = conn.get_all_addresses( - addresses=[eip.public_ip]) - len(lookup_addresses).should.be.equal(1) - lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) - - # Can we find first two when we search for them? - lookup_addresses = conn.get_all_addresses( - addresses=[eips[0].public_ip, eips[1].public_ip]) - len(lookup_addresses).should.be.equal(2) - lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) - lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) - - # Release all IPs - for eip in eips: - eip.release() - len(conn.get_all_addresses()).should.be.equal(0) - - -@mock_ec2_deprecated -def test_eip_describe_none(): - """Error when search for bogus IP""" - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_addresses(addresses=["256.256.256.256"]) - cm.exception.code.should.equal('InvalidAddress.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_eip_filters(): - service = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') - subnet_res = client.create_subnet( - VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') - - def create_inst_with_eip(): - instance = service.create_instances(**{ - 'InstanceType': 't2.micro', - 'ImageId': 'ami-test', - 'MinCount': 1, - 'MaxCount': 1, - 'SubnetId': subnet_res['Subnet']['SubnetId'] - })[0] - allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] - _ = client.associate_address( - InstanceId=instance.id, - AllocationId=allocation_id, - AllowReassociation=False) - instance.load() - address = service.VpcAddress(allocation_id) - address.load() - return instance, address - - inst1, eip1 = create_inst_with_eip() - inst2, eip2 = create_inst_with_eip() - inst3, eip3 = create_inst_with_eip() - - # Param search by AllocationId - addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id])) - len(addresses).should.be.equal(1) - addresses[0].public_ip.should.equal(eip2.public_ip) - inst2.public_ip_address.should.equal(addresses[0].public_ip) - - # Param search by PublicIp - addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip])) - len(addresses).should.be.equal(1) - addresses[0].public_ip.should.equal(eip3.public_ip) - inst3.public_ip_address.should.equal(addresses[0].public_ip) - - # Param search by Filter - def check_vpc_filter_valid(filter_name, filter_values): - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': filter_name, - 'Values': filter_values}])) - len(addresses).should.equal(2) - ips = [addr.public_ip for addr in addresses] - set(ips).should.equal(set([eip1.public_ip, eip2.public_ip])) - ips.should.contain(inst1.public_ip_address) - - def check_vpc_filter_invalid(filter_name): - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': filter_name, - 'Values': ['dummy1', 'dummy2']}])) - len(addresses).should.equal(0) - - def check_vpc_filter(filter_name, filter_values): - check_vpc_filter_valid(filter_name, filter_values) - check_vpc_filter_invalid(filter_name) - - check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id]) - check_vpc_filter('association-id', [eip1.association_id, eip2.association_id]) - check_vpc_filter('instance-id', [inst1.id, inst2.id]) - check_vpc_filter( - 'network-interface-id', - [inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'), - inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')]) - check_vpc_filter( - 'private-ip-address', - [inst1.network_interfaces_attribute[0].get('PrivateIpAddress'), - inst2.network_interfaces_attribute[0].get('PrivateIpAddress')]) - check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address]) - - # all the ips are in a VPC - addresses = list(service.vpc_addresses.filter( - Filters=[{'Name': 'domain', 'Values': ['vpc']}])) - len(addresses).should.equal(3) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import six + +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +import logging + + +@mock_ec2_deprecated +def test_eip_allocate_classic(): + """Allocate/release Classic EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + standard = conn.allocate_address(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + + standard = conn.allocate_address() + standard.should.be.a(boto.ec2.address.Address) + standard.public_ip.should.be.a(six.text_type) + standard.instance_id.should.be.none + standard.domain.should.be.equal("standard") + + with assert_raises(EC2ResponseError) as ex: + standard.release(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + + standard.release() + standard.should_not.be.within(conn.get_all_addresses()) + + +@mock_ec2_deprecated +def test_eip_allocate_vpc(): + """Allocate/release VPC EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + vpc = conn.allocate_address(domain="vpc", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AllocateAddress operation: Request would have succeeded, but DryRun flag is set') + + vpc = conn.allocate_address(domain="vpc") + vpc.should.be.a(boto.ec2.address.Address) + vpc.domain.should.be.equal("vpc") + logging.debug("vpc alloc_id:".format(vpc.allocation_id)) + vpc.release() + +@mock_ec2 +def test_specific_eip_allocate_vpc(): + """Allocate VPC EIP with specific address""" + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + vpc = client.allocate_address(Domain="vpc", Address="127.38.43.222") + vpc['Domain'].should.be.equal("vpc") + vpc['PublicIp'].should.be.equal("127.38.43.222") + logging.debug("vpc alloc_id:".format(vpc['AllocationId'])) + + +@mock_ec2_deprecated +def test_eip_allocate_invalid_domain(): + """Allocate EIP invalid domain""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.allocate_address(domain="bogus") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_associate_classic(): + """Associate/Disassociate EIP to classic instance""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address() + eip.instance_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(public_ip=eip.public_ip) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as ex: + conn.associate_address(instance_id=instance.id, + public_ip=eip.public_ip, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AssociateAddress operation: Request would have succeeded, but DryRun flag is set') + + conn.associate_address(instance_id=instance.id, public_ip=eip.public_ip) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(instance.id) + + with assert_raises(EC2ResponseError) as ex: + conn.disassociate_address(public_ip=eip.public_ip, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DisAssociateAddress operation: Request would have succeeded, but DryRun flag is set') + + conn.disassociate_address(public_ip=eip.public_ip) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(u'') + eip.release() + eip.should_not.be.within(conn.get_all_addresses()) + eip = None + + instance.terminate() + + +@mock_ec2_deprecated +def test_eip_associate_vpc(): + """Associate/Disassociate EIP to VPC instance""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address(domain='vpc') + eip.instance_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(allocation_id=eip.allocation_id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address(instance_id=instance.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(instance.id) + conn.disassociate_address(association_id=eip.association_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.instance_id.should.be.equal(u'') + eip.association_id.should.be.none + + with assert_raises(EC2ResponseError) as ex: + eip.release(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ReleaseAddress operation: Request would have succeeded, but DryRun flag is set') + + eip.release() + eip = None + + instance.terminate() + + +@mock_ec2 +def test_eip_boto3_vpc_association(): + """Associate EIP to VPC instance in a new subnet with boto3""" + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') + subnet_res = client.create_subnet( + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + instance = service.create_instances(**{ + 'InstanceType': 't2.micro', + 'ImageId': 'ami-test', + 'MinCount': 1, + 'MaxCount': 1, + 'SubnetId': subnet_res['Subnet']['SubnetId'] + })[0] + allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + address = service.VpcAddress(allocation_id) + address.load() + address.association_id.should.be.none + address.instance_id.should.be.empty + address.network_interface_id.should.be.empty + association_id = client.associate_address( + InstanceId=instance.id, + AllocationId=allocation_id, + AllowReassociation=False) + instance.load() + address.reload() + address.association_id.should_not.be.none + instance.public_ip_address.should_not.be.none + instance.public_dns_name.should_not.be.none + address.network_interface_id.should.equal(instance.network_interfaces_attribute[0].get('NetworkInterfaceId')) + address.public_ip.should.equal(instance.public_ip_address) + address.instance_id.should.equal(instance.id) + + client.disassociate_address(AssociationId=address.association_id) + instance.reload() + address.reload() + instance.public_ip_address.should.be.none + address.network_interface_id.should.be.empty + address.association_id.should.be.none + address.instance_id.should.be.empty + + +@mock_ec2_deprecated +def test_eip_associate_network_interface(): + """Associate/Disassociate EIP to NIC""" + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + eni = conn.create_network_interface(subnet.id) + + eip = conn.allocate_address(domain='vpc') + eip.network_interface_id.should.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(network_interface_id=eni.id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address(network_interface_id=eni.id, + allocation_id=eip.allocation_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.network_interface_id.should.be.equal(eni.id) + + conn.disassociate_address(association_id=eip.association_id) + # no .update() on address ): + eip = conn.get_all_addresses(addresses=[eip.public_ip])[0] + eip.network_interface_id.should.be.equal(u'') + eip.association_id.should.be.none + eip.release() + eip = None + + +@mock_ec2_deprecated +def test_eip_reassociate(): + """reassociate EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + + eip = conn.allocate_address() + conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) + + # Same ID is idempotent + conn.associate_address(instance_id=instance1.id, public_ip=eip.public_ip) + + # Different ID detects resource association + with assert_raises(EC2ResponseError) as cm: + conn.associate_address( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=False) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address.when.called_with( + instance_id=instance2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + + eip.release() + eip = None + + instance1.terminate() + instance2.terminate() + + +@mock_ec2_deprecated +def test_eip_reassociate_nic(): + """reassociate EIP""" + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + eni1 = conn.create_network_interface(subnet.id) + eni2 = conn.create_network_interface(subnet.id) + + eip = conn.allocate_address() + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) + + # Same ID is idempotent + conn.associate_address(network_interface_id=eni1.id, + public_ip=eip.public_ip) + + # Different ID detects resource association + with assert_raises(EC2ResponseError) as cm: + conn.associate_address( + network_interface_id=eni2.id, public_ip=eip.public_ip) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.associate_address.when.called_with( + network_interface_id=eni2.id, public_ip=eip.public_ip, allow_reassociation=True).should_not.throw(EC2ResponseError) + + eip.release() + eip = None + + +@mock_ec2_deprecated +def test_eip_associate_invalid_args(): + """Associate EIP, invalid args """ + conn = boto.connect_ec2('the_key', 'the_secret') + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + eip = conn.allocate_address() + + with assert_raises(EC2ResponseError) as cm: + conn.associate_address(instance_id=instance.id) + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + instance.terminate() + + +@mock_ec2_deprecated +def test_eip_disassociate_bogus_association(): + """Disassociate bogus EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_address(association_id="bogus") + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_release_bogus_eip(): + """Release bogus EIP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.release_address(allocation_id="bogus") + cm.exception.code.should.equal('InvalidAllocationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_disassociate_arg_error(): + """Invalid arguments disassociate address""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_address() + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_release_arg_error(): + """Invalid arguments release address""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.release_address() + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_eip_describe(): + """Listing of allocated Elastic IP Addresses.""" + conn = boto.connect_ec2('the_key', 'the_secret') + eips = [] + number_of_classic_ips = 2 + number_of_vpc_ips = 2 + + # allocate some IPs + for _ in range(number_of_classic_ips): + eips.append(conn.allocate_address()) + for _ in range(number_of_vpc_ips): + eips.append(conn.allocate_address(domain='vpc')) + len(eips).should.be.equal(number_of_classic_ips + number_of_vpc_ips) + + # Can we find each one individually? + for eip in eips: + if eip.allocation_id: + lookup_addresses = conn.get_all_addresses( + allocation_ids=[eip.allocation_id]) + else: + lookup_addresses = conn.get_all_addresses( + addresses=[eip.public_ip]) + len(lookup_addresses).should.be.equal(1) + lookup_addresses[0].public_ip.should.be.equal(eip.public_ip) + + # Can we find first two when we search for them? + lookup_addresses = conn.get_all_addresses( + addresses=[eips[0].public_ip, eips[1].public_ip]) + len(lookup_addresses).should.be.equal(2) + lookup_addresses[0].public_ip.should.be.equal(eips[0].public_ip) + lookup_addresses[1].public_ip.should.be.equal(eips[1].public_ip) + + # Release all IPs + for eip in eips: + eip.release() + len(conn.get_all_addresses()).should.be.equal(0) + + +@mock_ec2_deprecated +def test_eip_describe_none(): + """Error when search for bogus IP""" + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_addresses(addresses=["256.256.256.256"]) + cm.exception.code.should.equal('InvalidAddress.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_eip_filters(): + service = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc_res = client.create_vpc(CidrBlock='10.0.0.0/24') + subnet_res = client.create_subnet( + VpcId=vpc_res['Vpc']['VpcId'], CidrBlock='10.0.0.0/24') + + def create_inst_with_eip(): + instance = service.create_instances(**{ + 'InstanceType': 't2.micro', + 'ImageId': 'ami-test', + 'MinCount': 1, + 'MaxCount': 1, + 'SubnetId': subnet_res['Subnet']['SubnetId'] + })[0] + allocation_id = client.allocate_address(Domain='vpc')['AllocationId'] + _ = client.associate_address( + InstanceId=instance.id, + AllocationId=allocation_id, + AllowReassociation=False) + instance.load() + address = service.VpcAddress(allocation_id) + address.load() + return instance, address + + inst1, eip1 = create_inst_with_eip() + inst2, eip2 = create_inst_with_eip() + inst3, eip3 = create_inst_with_eip() + + # Param search by AllocationId + addresses = list(service.vpc_addresses.filter(AllocationIds=[eip2.allocation_id])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip2.public_ip) + inst2.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by PublicIp + addresses = list(service.vpc_addresses.filter(PublicIps=[eip3.public_ip])) + len(addresses).should.be.equal(1) + addresses[0].public_ip.should.equal(eip3.public_ip) + inst3.public_ip_address.should.equal(addresses[0].public_ip) + + # Param search by Filter + def check_vpc_filter_valid(filter_name, filter_values): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': filter_values}])) + len(addresses).should.equal(2) + ips = [addr.public_ip for addr in addresses] + set(ips).should.equal(set([eip1.public_ip, eip2.public_ip])) + ips.should.contain(inst1.public_ip_address) + + def check_vpc_filter_invalid(filter_name): + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': filter_name, + 'Values': ['dummy1', 'dummy2']}])) + len(addresses).should.equal(0) + + def check_vpc_filter(filter_name, filter_values): + check_vpc_filter_valid(filter_name, filter_values) + check_vpc_filter_invalid(filter_name) + + check_vpc_filter('allocation-id', [eip1.allocation_id, eip2.allocation_id]) + check_vpc_filter('association-id', [eip1.association_id, eip2.association_id]) + check_vpc_filter('instance-id', [inst1.id, inst2.id]) + check_vpc_filter( + 'network-interface-id', + [inst1.network_interfaces_attribute[0].get('NetworkInterfaceId'), + inst2.network_interfaces_attribute[0].get('NetworkInterfaceId')]) + check_vpc_filter( + 'private-ip-address', + [inst1.network_interfaces_attribute[0].get('PrivateIpAddress'), + inst2.network_interfaces_attribute[0].get('PrivateIpAddress')]) + check_vpc_filter('public-ip', [inst1.public_ip_address, inst2.public_ip_address]) + + # all the ips are in a VPC + addresses = list(service.vpc_addresses.filter( + Filters=[{'Name': 'domain', 'Values': ['vpc']}])) + len(addresses).should.equal(3) diff --git a/tests/test_ec2/test_elastic_network_interfaces.py b/tests/test_ec2/test_elastic_network_interfaces.py index 828f9d917..56959e484 100644 --- a/tests/test_ec2/test_elastic_network_interfaces.py +++ b/tests/test_ec2/test_elastic_network_interfaces.py @@ -1,362 +1,362 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto3 -from botocore.exceptions import ClientError -import boto -import boto.cloudformation -import boto.ec2 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated -from tests.helpers import requires_boto_gte -from tests.test_cloudformation.fixtures import vpc_eni -import json - - -@mock_ec2_deprecated -def test_elastic_network_interfaces(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - with assert_raises(EC2ResponseError) as ex: - eni = conn.create_network_interface(subnet.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - eni = conn.create_network_interface(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - eni = all_enis[0] - eni.groups.should.have.length_of(0) - eni.private_ip_addresses.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as ex: - conn.delete_network_interface(eni.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_network_interface(eni.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_network_interface(eni.id) - cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_subnet_validation(): - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_network_interface("subnet-abcd1234") - cm.exception.error_code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_with_private_ip(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - private_ip = "54.0.0.1" - eni = conn.create_network_interface(subnet.id, private_ip) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(0) - - eni.private_ip_addresses.should.have.length_of(1) - eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_with_groups(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - conn.create_network_interface( - subnet.id, groups=[security_group1.id, security_group2.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - -@requires_boto_gte("2.12.0") -@mock_ec2_deprecated -def test_elastic_network_interfaces_modify_attribute(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - conn.create_network_interface(subnet.id, groups=[security_group1.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(1) - eni.groups[0].id.should.equal(security_group1.id) - - with assert_raises(EC2ResponseError) as ex: - conn.modify_network_interface_attribute( - eni.id, 'groupset', [security_group2.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_network_interface_attribute( - eni.id, 'groupset', [security_group2.id]) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - eni = all_enis[0] - eni.groups.should.have.length_of(1) - eni.groups[0].id.should.equal(security_group2.id) - - -@mock_ec2_deprecated -def test_elastic_network_interfaces_filtering(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - - eni1 = conn.create_network_interface( - subnet.id, groups=[security_group1.id, security_group2.id]) - eni2 = conn.create_network_interface( - subnet.id, groups=[security_group1.id]) - eni3 = conn.create_network_interface(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(3) - - # Filter by NetworkInterfaceId - enis_by_id = conn.get_all_network_interfaces([eni1.id]) - enis_by_id.should.have.length_of(1) - set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) - - # Filter by ENI ID - enis_by_id = conn.get_all_network_interfaces( - filters={'network-interface-id': eni1.id}) - enis_by_id.should.have.length_of(1) - set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) - - # Filter by Security Group - enis_by_group = conn.get_all_network_interfaces( - filters={'group-id': security_group1.id}) - enis_by_group.should.have.length_of(2) - set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) - - # Filter by ENI ID and Security Group - enis_by_group = conn.get_all_network_interfaces( - filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) - enis_by_group.should.have.length_of(1) - set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) - - # Unsupported filter - conn.get_all_network_interfaces.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_tag_name(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - with assert_raises(ClientError) as ex: - eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'tag:Name', 'Values': ['eni1']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_availability_zone(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - subnet2 = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b') - - eni1 = ec2.create_network_interface( - SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15') - - eni2 = ec2.create_network_interface( - SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id]) - - filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_private_ip(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_vpc_id(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2 -def test_elastic_network_interfaces_get_by_subnet_id(): - ec2 = boto3.resource('ec2', region_name='us-west-2') - ec2_client = boto3.client('ec2', region_name='us-west-2') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') - - eni1 = ec2.create_network_interface( - SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') - - # The status of the new interface should be 'available' - waiter = ec2_client.get_waiter('network_interface_available') - waiter.wait(NetworkInterfaceIds=[eni1.id]) - - filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(1) - - filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}] - enis = list(ec2.network_interfaces.filter(Filters=filters)) - enis.should.have.length_of(0) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_elastic_network_interfaces_cloudformation(): - template = vpc_eni.template - template_json = json.dumps(template) - conn = boto.cloudformation.connect_to_region("us-west-1") - conn.create_stack( - "test_stack", - template_body=template_json, - ) - ec2_conn = boto.ec2.connect_to_region("us-west-1") - eni = ec2_conn.get_all_network_interfaces()[0] - - stack = conn.describe_stacks()[0] - resources = stack.describe_resources() - cfn_eni = [resource for resource in resources if resource.resource_type == - 'AWS::EC2::NetworkInterface'][0] - cfn_eni.physical_resource_id.should.equal(eni.id) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto3 +from botocore.exceptions import ClientError +import boto +import boto.cloudformation +import boto.ec2 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_cloudformation_deprecated, mock_ec2_deprecated +from tests.helpers import requires_boto_gte +from tests.test_cloudformation.fixtures import vpc_eni +import json + + +@mock_ec2_deprecated +def test_elastic_network_interfaces(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + with assert_raises(EC2ResponseError) as ex: + eni = conn.create_network_interface(subnet.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + eni = conn.create_network_interface(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + eni = all_enis[0] + eni.groups.should.have.length_of(0) + eni.private_ip_addresses.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as ex: + conn.delete_network_interface(eni.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_network_interface(eni.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_network_interface(eni.id) + cm.exception.error_code.should.equal('InvalidNetworkInterfaceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_subnet_validation(): + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_network_interface("subnet-abcd1234") + cm.exception.error_code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_with_private_ip(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + private_ip = "54.0.0.1" + eni = conn.create_network_interface(subnet.id, private_ip) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(0) + + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_with_groups(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + +@requires_boto_gte("2.12.0") +@mock_ec2_deprecated +def test_elastic_network_interfaces_modify_attribute(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + conn.create_network_interface(subnet.id, groups=[security_group1.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(1) + eni.groups[0].id.should.equal(security_group1.id) + + with assert_raises(EC2ResponseError) as ex: + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_network_interface_attribute( + eni.id, 'groupset', [security_group2.id]) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + eni = all_enis[0] + eni.groups.should.have.length_of(1) + eni.groups[0].id.should.equal(security_group2.id) + + +@mock_ec2_deprecated +def test_elastic_network_interfaces_filtering(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + + eni1 = conn.create_network_interface( + subnet.id, groups=[security_group1.id, security_group2.id]) + eni2 = conn.create_network_interface( + subnet.id, groups=[security_group1.id]) + eni3 = conn.create_network_interface(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(3) + + # Filter by NetworkInterfaceId + enis_by_id = conn.get_all_network_interfaces([eni1.id]) + enis_by_id.should.have.length_of(1) + set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) + + # Filter by ENI ID + enis_by_id = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id}) + enis_by_id.should.have.length_of(1) + set([eni.id for eni in enis_by_id]).should.equal(set([eni1.id])) + + # Filter by Security Group + enis_by_group = conn.get_all_network_interfaces( + filters={'group-id': security_group1.id}) + enis_by_group.should.have.length_of(2) + set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id, eni2.id])) + + # Filter by ENI ID and Security Group + enis_by_group = conn.get_all_network_interfaces( + filters={'network-interface-id': eni1.id, 'group-id': security_group1.id}) + enis_by_group.should.have.length_of(1) + set([eni.id for eni in enis_by_group]).should.equal(set([eni1.id])) + + # Unsupported filter + conn.get_all_network_interfaces.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_tag_name(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + with assert_raises(ClientError) as ex: + eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}], DryRun=True) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + eni1.create_tags(Tags=[{'Key': 'Name', 'Value': 'eni1'}]) + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'tag:Name', 'Values': ['eni1']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'tag:Name', 'Values': ['wrong-name']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_availability_zone(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + subnet2 = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.1.0/24', AvailabilityZone='us-west-2b') + + eni1 = ec2.create_network_interface( + SubnetId=subnet1.id, PrivateIpAddress='10.0.0.15') + + eni2 = ec2.create_network_interface( + SubnetId=subnet2.id, PrivateIpAddress='10.0.1.15') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id, eni2.id]) + + filters = [{'Name': 'availability-zone', 'Values': ['us-west-2a']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'availability-zone', 'Values': ['us-west-2c']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_private_ip(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.5']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'private-ip-address', 'Values': ['10.0.10.10']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.5']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'addresses.private-ip-address', 'Values': ['10.0.10.10']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_vpc_id(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'vpc-id', 'Values': [subnet.vpc_id]}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'vpc-id', 'Values': ['vpc-aaaa1111']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2 +def test_elastic_network_interfaces_get_by_subnet_id(): + ec2 = boto3.resource('ec2', region_name='us-west-2') + ec2_client = boto3.client('ec2', region_name='us-west-2') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-2a') + + eni1 = ec2.create_network_interface( + SubnetId=subnet.id, PrivateIpAddress='10.0.10.5') + + # The status of the new interface should be 'available' + waiter = ec2_client.get_waiter('network_interface_available') + waiter.wait(NetworkInterfaceIds=[eni1.id]) + + filters = [{'Name': 'subnet-id', 'Values': [subnet.id]}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(1) + + filters = [{'Name': 'subnet-id', 'Values': ['subnet-aaaa1111']}] + enis = list(ec2.network_interfaces.filter(Filters=filters)) + enis.should.have.length_of(0) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_elastic_network_interfaces_cloudformation(): + template = vpc_eni.template + template_json = json.dumps(template) + conn = boto.cloudformation.connect_to_region("us-west-1") + conn.create_stack( + "test_stack", + template_body=template_json, + ) + ec2_conn = boto.ec2.connect_to_region("us-west-1") + eni = ec2_conn.get_all_network_interfaces()[0] + + stack = conn.describe_stacks()[0] + resources = stack.describe_resources() + cfn_eni = [resource for resource in resources if resource.resource_type == + 'AWS::EC2::NetworkInterface'][0] + cfn_eni.physical_resource_id.should.equal(eni.id) diff --git a/tests/test_ec2/test_general.py b/tests/test_ec2/test_general.py index 4c319d30d..7249af6a2 100644 --- a/tests/test_ec2/test_general.py +++ b/tests/test_ec2/test_general.py @@ -1,42 +1,42 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 - - -@mock_ec2_deprecated -def test_console_output(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance_id = reservation.instances[0].id - output = conn.get_console_output(instance_id) - output.output.should_not.equal(None) - - -@mock_ec2_deprecated -def test_console_output_without_instance(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_console_output('i-1234abcd') - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_console_output_boto3(): - conn = boto3.resource('ec2', 'us-east-1') - instances = conn.create_instances(ImageId='ami-1234abcd', - MinCount=1, - MaxCount=1) - - output = instances[0].console_output() - output.get('Output').should_not.equal(None) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 + + +@mock_ec2_deprecated +def test_console_output(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance_id = reservation.instances[0].id + output = conn.get_console_output(instance_id) + output.output.should_not.equal(None) + + +@mock_ec2_deprecated +def test_console_output_without_instance(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_console_output('i-1234abcd') + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_console_output_boto3(): + conn = boto3.resource('ec2', 'us-east-1') + instances = conn.create_instances(ImageId='ami-1234abcd', + MinCount=1, + MaxCount=1) + + output = instances[0].console_output() + output.get('Output').should_not.equal(None) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 84b4fbd7d..109017b3c 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -1,1256 +1,1256 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import base64 -import datetime -import ipaddress - -import six -import boto -import boto3 -from boto.ec2.instance import Reservation, InstanceAttribute -from boto.exception import EC2ResponseError, EC2ResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from tests.helpers import requires_boto_gte - - -################ Test Readme ############### -def add_servers(ami_id, count): - conn = boto.connect_ec2() - for index in range(count): - conn.run_instances(ami_id) - - -@mock_ec2_deprecated -def test_add_servers(): - add_servers('ami-1234abcd', 2) - - conn = boto.connect_ec2() - reservations = conn.get_all_instances() - assert len(reservations) == 2 - instance1 = reservations[0].instances[0] - assert instance1.image_id == 'ami-1234abcd' - -############################################ - - -@freeze_time("2014-01-01 05:00:00") -@mock_ec2_deprecated -def test_instance_launch_and_terminate(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - reservation = conn.run_instances('ami-1234abcd', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') - - reservation = conn.run_instances('ami-1234abcd') - reservation.should.be.a(Reservation) - reservation.instances.should.have.length_of(1) - instance = reservation.instances[0] - instance.state.should.equal('pending') - - reservations = conn.get_all_instances() - reservations.should.have.length_of(1) - reservations[0].id.should.equal(reservation.id) - instances = reservations[0].instances - instances.should.have.length_of(1) - instance = instances[0] - instance.id.should.equal(instance.id) - instance.state.should.equal('running') - instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") - instance.vpc_id.should.equal(None) - instance.placement.should.equal('us-east-1a') - - root_device_name = instance.root_device_name - instance.block_device_mapping[ - root_device_name].status.should.equal('in-use') - volume_id = instance.block_device_mapping[root_device_name].volume_id - volume_id.should.match(r'vol-\w+') - - volume = conn.get_all_volumes(volume_ids=[volume_id])[0] - volume.attach_data.instance_id.should.equal(instance.id) - volume.status.should.equal('in-use') - - with assert_raises(EC2ResponseError) as ex: - conn.terminate_instances([instance.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') - - conn.terminate_instances([instance.id]) - - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - instance.state.should.equal('terminated') - - -@mock_ec2_deprecated -def test_terminate_empty_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.terminate_instances.when.called_with( - []).should.throw(EC2ResponseError) - - -@freeze_time("2014-01-01 05:00:00") -@mock_ec2_deprecated -def test_instance_attach_volume(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - vol1 = conn.create_volume(size=36, zone=conn.region.name) - vol1.attach(instance.id, "/dev/sda1") - vol1.update() - vol2 = conn.create_volume(size=65, zone=conn.region.name) - vol2.attach(instance.id, "/dev/sdb1") - vol2.update() - vol3 = conn.create_volume(size=130, zone=conn.region.name) - vol3.attach(instance.id, "/dev/sdc1") - vol3.update() - - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - - instance.block_device_mapping.should.have.length_of(3) - - for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): - v.attach_data.instance_id.should.equal(instance.id) - # can do due to freeze_time decorator. - v.attach_data.attach_time.should.equal(instance.launch_time) - # can do due to freeze_time decorator. - v.create_time.should.equal(instance.launch_time) - v.region.name.should.equal(instance.region.name) - v.status.should.equal('in-use') - - -@mock_ec2_deprecated -def test_get_instances_by_id(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - - reservations = conn.get_all_instances(instance_ids=[instance1.id]) - reservations.should.have.length_of(1) - reservation = reservations[0] - reservation.instances.should.have.length_of(1) - reservation.instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - instance_ids=[instance1.id, instance2.id]) - reservations.should.have.length_of(1) - reservation = reservations[0] - reservation.instances.should.have.length_of(2) - instance_ids = [instance.id for instance in reservation.instances] - instance_ids.should.equal([instance1.id, instance2.id]) - - # Call get_all_instances with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: - conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_get_paginated_instances(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - for i in range(100): - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1) - resp = client.describe_instances(MaxResults=50) - reservations = resp['Reservations'] - reservations.should.have.length_of(50) - next_token = resp['NextToken'] - next_token.should_not.be.none - resp2 = client.describe_instances(NextToken=next_token) - reservations.extend(resp2['Reservations']) - reservations.should.have.length_of(100) - assert 'NextToken' not in resp2.keys() - - -@mock_ec2 -def test_create_with_tags(): - ec2 = boto3.client('ec2', region_name='us-west-2') - instances = ec2.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - assert 'Tags' in instances['Instances'][0] - len(instances['Instances'][0]['Tags']).should.equal(3) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_state(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - - conn.terminate_instances([instance1.id]) - - reservations = conn.get_all_instances( - filters={'instance-state-name': 'running'}) - reservations.should.have.length_of(1) - # Since we terminated instance1, only instance2 and instance3 should be - # returned - instance_ids = [instance.id for instance in reservations[0].instances] - set(instance_ids).should.equal(set([instance2.id, instance3.id])) - - reservations = conn.get_all_instances( - [instance2.id], filters={'instance-state-name': 'running'}) - reservations.should.have.length_of(1) - instance_ids = [instance.id for instance in reservations[0].instances] - instance_ids.should.equal([instance2.id]) - - reservations = conn.get_all_instances( - [instance2.id], filters={'instance-state-name': 'terminated'}) - list(reservations).should.equal([]) - - # get_all_instances should still return all 3 - reservations = conn.get_all_instances() - reservations[0].instances.should.have.length_of(3) - - conn.get_all_instances.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_instance_id(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - - reservations = conn.get_all_instances( - filters={'instance-id': instance1.id}) - # get_all_instances should return just instance1 - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'instance-id': [instance1.id, instance2.id]}) - # get_all_instances should return two - reservations[0].instances.should.have.length_of(2) - - reservations = conn.get_all_instances( - filters={'instance-id': 'non-existing-id'}) - reservations.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_instance_type(): - conn = boto.connect_ec2() - reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small') - instance1 = reservation1.instances[0] - reservation2 = conn.run_instances('ami-1234abcd', instance_type='m1.small') - instance2 = reservation2.instances[0] - reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') - instance3 = reservation3.instances[0] - - reservations = conn.get_all_instances( - filters={'instance-type': 'm1.small'}) - # get_all_instances should return instance1,2 - reservations.should.have.length_of(2) - reservations[0].instances.should.have.length_of(1) - reservations[1].instances.should.have.length_of(1) - instance_ids = [reservations[0].instances[0].id, - reservations[1].instances[0].id] - set(instance_ids).should.equal(set([instance1.id, instance2.id])) - - reservations = conn.get_all_instances( - filters={'instance-type': 't1.micro'}) - # get_all_instances should return one - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance3.id) - - reservations = conn.get_all_instances( - filters={'instance-type': ['t1.micro', 'm1.small']}) - reservations.should.have.length_of(3) - reservations[0].instances.should.have.length_of(1) - reservations[1].instances.should.have.length_of(1) - reservations[2].instances.should.have.length_of(1) - instance_ids = [ - reservations[0].instances[0].id, - reservations[1].instances[0].id, - reservations[2].instances[0].id, - ] - set(instance_ids).should.equal( - set([instance1.id, instance2.id, instance3.id])) - - reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) - # bogus instance-type should return none - reservations.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_reason_code(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.stop() - instance2.terminate() - - reservations = conn.get_all_instances( - filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) - # get_all_instances should return instance1 and instance2 - reservations[0].instances.should.have.length_of(2) - set([instance1.id, instance2.id]).should.equal( - set([i.id for i in reservations[0].instances])) - - reservations = conn.get_all_instances(filters={'state-reason-code': ''}) - # get_all_instances should return instance 3 - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_source_dest_check(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instance1, instance2 = reservation.instances - conn.modify_instance_attribute( - instance1.id, attribute='sourceDestCheck', value=False) - - source_dest_check_false = conn.get_all_instances( - filters={'source-dest-check': 'false'}) - source_dest_check_true = conn.get_all_instances( - filters={'source-dest-check': 'true'}) - - source_dest_check_false[0].instances.should.have.length_of(1) - source_dest_check_false[0].instances[0].id.should.equal(instance1.id) - - source_dest_check_true[0].instances.should.have.length_of(1) - source_dest_check_true[0].instances[0].id.should.equal(instance2.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_vpc_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc1 = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") - reservation1 = conn.run_instances( - 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) - instance1 = reservation1.instances[0] - - vpc2 = conn.create_vpc("10.1.0.0/16") - subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") - reservation2 = conn.run_instances( - 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) - instance2 = reservation2.instances[0] - - reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) - reservations1.should.have.length_of(1) - reservations1[0].instances.should.have.length_of(1) - reservations1[0].instances[0].id.should.equal(instance1.id) - reservations1[0].instances[0].vpc_id.should.equal(vpc1.id) - reservations1[0].instances[0].subnet_id.should.equal(subnet1.id) - - reservations2 = conn.get_all_instances(filters={'vpc-id': vpc2.id}) - reservations2.should.have.length_of(1) - reservations2[0].instances.should.have.length_of(1) - reservations2[0].instances[0].id.should.equal(instance2.id) - reservations2[0].instances[0].vpc_id.should.equal(vpc2.id) - reservations2[0].instances[0].subnet_id.should.equal(subnet2.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_architecture(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=1) - instance = reservation.instances - - reservations = conn.get_all_instances(filters={'architecture': 'x86_64'}) - # get_all_instances should return the instance - reservations[0].instances.should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_image_id(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1) - - reservations = client.describe_instances(Filters=[{'Name': 'image-id', - 'Values': [image_id]}])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_private_dns(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - conn = boto3.resource('ec2', 'us-east-1') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - PrivateIpAddress='10.0.0.1') - reservations = client.describe_instances(Filters=[ - {'Name': 'private-dns-name', 'Values': ['ip-10-0-0-1.ec2.internal']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_ni_private_dns(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-west-2') - conn = boto3.resource('ec2', 'us-west-2') - conn.create_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - PrivateIpAddress='10.0.0.1') - reservations = client.describe_instances(Filters=[ - {'Name': 'network-interface.private-dns-name', 'Values': ['ip-10-0-0-1.us-west-2.compute.internal']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_instance_group_name(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - client.create_security_group( - Description='test', - GroupName='test_sg' - ) - client.run_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - SecurityGroups=['test_sg']) - reservations = client.describe_instances(Filters=[ - {'Name': 'instance.group-name', 'Values': ['test_sg']} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2 -def test_get_instances_filtering_by_instance_group_id(): - image_id = 'ami-1234abcd' - client = boto3.client('ec2', region_name='us-east-1') - create_sg = client.create_security_group( - Description='test', - GroupName='test_sg' - ) - group_id = create_sg['GroupId'] - client.run_instances(ImageId=image_id, - MinCount=1, - MaxCount=1, - SecurityGroups=['test_sg']) - reservations = client.describe_instances(Filters=[ - {'Name': 'instance.group-id', 'Values': [group_id]} - ])['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1', 'value1') - instance1.add_tag('tag2', 'value2') - instance2.add_tag('tag1', 'value1') - instance2.add_tag('tag2', 'wrong value') - instance3.add_tag('tag2', 'value2') - - reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - # get_all_instances should return the instance with both tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - # get_all_instances should return the instance with both tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(1) - reservations[0].instances[0].id.should.equal(instance1.id) - - reservations = conn.get_all_instances( - filters={'tag:tag2': ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag_value(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1', 'value1') - instance1.add_tag('tag2', 'value2') - instance2.add_tag('tag1', 'value1') - instance2.add_tag('tag2', 'wrong value') - instance3.add_tag('tag2', 'value2') - - reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag-value': ['value2', 'value1']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(3) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - reservations[0].instances[2].id.should.equal(instance3.id) - - reservations = conn.get_all_instances( - filters={'tag-value': ['value2', 'bogus']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_get_instances_filtering_by_tag_name(): - conn = boto.connect_ec2() - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.add_tag('tag1') - instance1.add_tag('tag2') - instance2.add_tag('tag1') - instance2.add_tag('tag2X') - instance3.add_tag('tag3') - - reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) - # get_all_instances should return no instances - reservations.should.have.length_of(0) - - reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) - # get_all_instances should return both instances with this tag value - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(2) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - - reservations = conn.get_all_instances( - filters={'tag-key': ['tag1', 'tag3']}) - # get_all_instances should return both instances with one of the - # acceptable tag values - reservations.should.have.length_of(1) - reservations[0].instances.should.have.length_of(3) - reservations[0].instances[0].id.should.equal(instance1.id) - reservations[0].instances[1].id.should.equal(instance2.id) - reservations[0].instances[2].id.should.equal(instance3.id) - - -@mock_ec2_deprecated -def test_instance_start_and_stop(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', min_count=2) - instances = reservation.instances - instances.should.have.length_of(2) - - instance_ids = [instance.id for instance in instances] - - with assert_raises(EC2ResponseError) as ex: - stopped_instances = conn.stop_instances(instance_ids, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') - - stopped_instances = conn.stop_instances(instance_ids) - - for instance in stopped_instances: - instance.state.should.equal('stopping') - - with assert_raises(EC2ResponseError) as ex: - started_instances = conn.start_instances( - [instances[0].id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') - - started_instances = conn.start_instances([instances[0].id]) - started_instances[0].state.should.equal('pending') - - -@mock_ec2_deprecated -def test_instance_reboot(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.reboot(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') - - instance.reboot() - instance.state.should.equal('pending') - - -@mock_ec2_deprecated -def test_instance_attribute_instance_type(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("instanceType", "m1.small", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("instanceType", "m1.small") - - instance_attribute = instance.get_attribute("instanceType") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get('instanceType').should.equal("m1.small") - - -@mock_ec2_deprecated -def test_modify_instance_attribute_security_groups(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - sg_id = 'sg-1234abcd' - sg_id2 = 'sg-abcd4321' - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("groupSet", [sg_id, sg_id2]) - - instance_attribute = instance.get_attribute("groupSet") - instance_attribute.should.be.a(InstanceAttribute) - group_list = instance_attribute.get('groupSet') - any(g.id == sg_id for g in group_list).should.be.ok - any(g.id == sg_id2 for g in group_list).should.be.ok - - -@mock_ec2_deprecated -def test_instance_attribute_user_data(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute( - "userData", "this is my user data", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("userData", "this is my user data") - - instance_attribute = instance.get_attribute("userData") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("userData").should.equal("this is my user data") - - -@mock_ec2_deprecated -def test_instance_attribute_source_dest_check(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - # Default value is true - instance.sourceDestCheck.should.equal('true') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(True) - - # Set to false (note: Boto converts bool to string, eg 'false') - - with assert_raises(EC2ResponseError) as ex: - instance.modify_attribute("sourceDestCheck", False, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') - - instance.modify_attribute("sourceDestCheck", False) - - instance.update() - instance.sourceDestCheck.should.equal('false') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(False) - - # Set back to true - instance.modify_attribute("sourceDestCheck", True) - - instance.update() - instance.sourceDestCheck.should.equal('true') - - instance_attribute = instance.get_attribute("sourceDestCheck") - instance_attribute.should.be.a(InstanceAttribute) - instance_attribute.get("sourceDestCheck").should.equal(True) - - -@mock_ec2_deprecated -def test_user_data_with_run_instance(): - user_data = b"some user data" - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', user_data=user_data) - instance = reservation.instances[0] - - instance_attribute = instance.get_attribute("userData") - instance_attribute.should.be.a(InstanceAttribute) - retrieved_user_data = instance_attribute.get("userData").encode('utf-8') - decoded_user_data = base64.decodestring(retrieved_user_data) - decoded_user_data.should.equal(b"some user data") - - -@mock_ec2_deprecated -def test_run_instance_with_security_group_name(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - group = conn.create_security_group( - 'group1', "some description", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - group = conn.create_security_group('group1', "some description") - - reservation = conn.run_instances('ami-1234abcd', - security_groups=['group1']) - instance = reservation.instances[0] - - instance.groups[0].id.should.equal(group.id) - instance.groups[0].name.should.equal("group1") - - -@mock_ec2_deprecated -def test_run_instance_with_security_group_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - group = conn.create_security_group('group1', "some description") - reservation = conn.run_instances('ami-1234abcd', - security_group_ids=[group.id]) - instance = reservation.instances[0] - - instance.groups[0].id.should.equal(group.id) - instance.groups[0].name.should.equal("group1") - - -@mock_ec2_deprecated -def test_run_instance_with_instance_type(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro") - instance = reservation.instances[0] - - instance.instance_type.should.equal("t1.micro") - - -@mock_ec2_deprecated -def test_run_instance_with_default_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.placement.should.equal("us-east-1a") - - -@mock_ec2_deprecated -def test_run_instance_with_placement(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b") - instance = reservation.instances[0] - - instance.placement.should.equal("us-east-1b") - - -@mock_ec2 -def test_run_instance_with_subnet_boto3(): - client = boto3.client('ec2', region_name='eu-central-1') - - ip_networks = [ - (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), - (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) - ] - - # Tests instances are created with the correct IPs - for vpc_cidr, subnet_cidr in ip_networks: - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id - ) - instance = resp['Instances'][0] - instance['SubnetId'].should.equal(subnet_id) - - priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) - subnet_cidr.should.contain(priv_ipv4) - - -@mock_ec2 -def test_run_instance_with_specified_private_ipv4(): - client = boto3.client('ec2', region_name='eu-central-1') - - vpc_cidr = ipaddress.ip_network('192.168.42.0/24') - subnet_cidr = ipaddress.ip_network('192.168.42.0/25') - - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id, - PrivateIpAddress='192.168.42.5' - ) - instance = resp['Instances'][0] - instance['SubnetId'].should.equal(subnet_id) - instance['PrivateIpAddress'].should.equal('192.168.42.5') - - -@mock_ec2 -def test_run_instance_mapped_public_ipv4(): - client = boto3.client('ec2', region_name='eu-central-1') - - vpc_cidr = ipaddress.ip_network('192.168.42.0/24') - subnet_cidr = ipaddress.ip_network('192.168.42.0/25') - - resp = client.create_vpc( - CidrBlock=str(vpc_cidr), - AmazonProvidedIpv6CidrBlock=False, - DryRun=False, - InstanceTenancy='default' - ) - vpc_id = resp['Vpc']['VpcId'] - - resp = client.create_subnet( - CidrBlock=str(subnet_cidr), - VpcId=vpc_id - ) - subnet_id = resp['Subnet']['SubnetId'] - client.modify_subnet_attribute( - SubnetId=subnet_id, - MapPublicIpOnLaunch={'Value': True} - ) - - resp = client.run_instances( - ImageId='ami-1234abcd', - MaxCount=1, - MinCount=1, - SubnetId=subnet_id - ) - instance = resp['Instances'][0] - instance.should.contain('PublicDnsName') - instance.should.contain('PublicIpAddress') - len(instance['PublicDnsName']).should.be.greater_than(0) - len(instance['PublicIpAddress']).should.be.greater_than(0) - - -@mock_ec2_deprecated -def test_run_instance_with_nic_autocreated(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - private_ip = "10.0.0.1" - - reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, - security_groups=[security_group1.name], - security_group_ids=[security_group2.id], - private_ip_address=private_ip) - instance = reservation.instances[0] - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - eni = all_enis[0] - - instance.interfaces.should.have.length_of(1) - instance.interfaces[0].id.should.equal(eni.id) - - instance.subnet_id.should.equal(subnet.id) - instance.groups.should.have.length_of(2) - set([group.id for group in instance.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - eni.subnet_id.should.equal(subnet.id) - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - eni.private_ip_addresses.should.have.length_of(1) - eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) - - -@mock_ec2_deprecated -def test_run_instance_with_nic_preexisting(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - private_ip = "54.0.0.1" - eni = conn.create_network_interface( - subnet.id, private_ip, groups=[security_group1.id]) - - # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... - # annoying, but generates the desired querystring. - from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection - interface = NetworkInterfaceSpecification( - network_interface_id=eni.id, device_index=0) - interfaces = NetworkInterfaceCollection(interface) - # end Boto objects - - reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, - security_group_ids=[security_group2.id]) - instance = reservation.instances[0] - - instance.subnet_id.should.equal(subnet.id) - - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) - - instance.interfaces.should.have.length_of(1) - instance_eni = instance.interfaces[0] - instance_eni.id.should.equal(eni.id) - - instance_eni.subnet_id.should.equal(subnet.id) - instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - instance_eni.private_ip_addresses.should.have.length_of(1) - instance_eni.private_ip_addresses[ - 0].private_ip_address.should.equal(private_ip) - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_instance_with_nic_attach_detach(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - security_group1 = conn.create_security_group( - 'test security group #1', 'this is a test security group') - security_group2 = conn.create_security_group( - 'test security group #2', 'this is a test security group') - - reservation = conn.run_instances( - 'ami-1234abcd', security_group_ids=[security_group1.id]) - instance = reservation.instances[0] - - eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) - - # Check initial instance and ENI data - instance.interfaces.should.have.length_of(1) - - eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal( - set([security_group2.id])) - - # Attach - with assert_raises(EC2ResponseError) as ex: - conn.attach_network_interface( - eni.id, instance.id, device_index=1, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.attach_network_interface(eni.id, instance.id, device_index=1) - - # Check attached instance and ENI data - instance.update() - instance.interfaces.should.have.length_of(2) - instance_eni = instance.interfaces[1] - instance_eni.id.should.equal(eni.id) - instance_eni.groups.should.have.length_of(2) - set([group.id for group in instance_eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - eni = conn.get_all_network_interfaces( - filters={'network-interface-id': eni.id})[0] - eni.groups.should.have.length_of(2) - set([group.id for group in eni.groups]).should.equal( - set([security_group1.id, security_group2.id])) - - # Detach - with assert_raises(EC2ResponseError) as ex: - conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') - - conn.detach_network_interface(instance_eni.attachment.id) - - # Check detached instance and ENI data - instance.update() - instance.interfaces.should.have.length_of(1) - - eni = conn.get_all_network_interfaces( - filters={'network-interface-id': eni.id})[0] - eni.groups.should.have.length_of(1) - set([group.id for group in eni.groups]).should.equal( - set([security_group2.id])) - - # Detach with invalid attachment ID - with assert_raises(EC2ResponseError) as cm: - conn.detach_network_interface('eni-attach-1234abcd') - cm.exception.code.should.equal('InvalidAttachmentID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_ec2_classic_has_public_ip_address(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - instance.ip_address.should_not.equal(None) - instance.public_dns_name.should.contain(instance.ip_address.replace('.', '-')) - instance.private_ip_address.should_not.equal(None) - instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) - - -@mock_ec2_deprecated -def test_run_instance_with_keypair(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - - instance.key_name.should.equal("keypair_name") - - -@mock_ec2_deprecated -def test_describe_instance_status_no_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - all_status = conn.get_all_instance_status() - len(all_status).should.equal(0) - - -@mock_ec2_deprecated -def test_describe_instance_status_with_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.run_instances('ami-1234abcd', key_name="keypair_name") - - all_status = conn.get_all_instance_status() - len(all_status).should.equal(1) - all_status[0].instance_status.status.should.equal('ok') - all_status[0].system_status.status.should.equal('ok') - - -@mock_ec2_deprecated -def test_describe_instance_status_with_instance_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - - # We want to filter based on this one - reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") - instance = reservation.instances[0] - - # This is just to setup the test - conn.run_instances('ami-1234abcd', key_name="keypair_name") - - all_status = conn.get_all_instance_status(instance_ids=[instance.id]) - len(all_status).should.equal(1) - all_status[0].id.should.equal(instance.id) - - # Call get_all_instance_status with a bad id should raise an error - with assert_raises(EC2ResponseError) as cm: - conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) - cm.exception.code.should.equal('InvalidInstanceID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_describe_instance_status_with_non_running_instances(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd', min_count=3) - instance1, instance2, instance3 = reservation.instances - instance1.stop() - instance2.terminate() - - all_running_status = conn.get_all_instance_status() - all_running_status.should.have.length_of(1) - all_running_status[0].id.should.equal(instance3.id) - all_running_status[0].state_name.should.equal('running') - - all_status = conn.get_all_instance_status(include_all_instances=True) - all_status.should.have.length_of(3) - - status1 = next((s for s in all_status if s.id == instance1.id), None) - status1.state_name.should.equal('stopped') - - status2 = next((s for s in all_status if s.id == instance2.id), None) - status2.state_name.should.equal('terminated') - - status3 = next((s for s in all_status if s.id == instance3.id), None) - status3.state_name.should.equal('running') - - -@mock_ec2_deprecated -def test_get_instance_by_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - - conn.run_instances('ami-1234abcd') - instance = conn.get_only_instances()[0] - - security_group = conn.create_security_group('test', 'test') - - with assert_raises(EC2ResponseError) as ex: - conn.modify_instance_attribute(instance.id, "groupSet", [ - security_group.id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') - - conn.modify_instance_attribute( - instance.id, "groupSet", [security_group.id]) - - security_group_instances = security_group.instances() - - assert len(security_group_instances) == 1 - assert security_group_instances[0].id == instance.id - - -@mock_ec2 -def test_modify_delete_on_termination(): - ec2_client = boto3.resource('ec2', region_name='us-west-1') - result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) - instance = result[0] - instance.load() - instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) - instance.modify_attribute( - BlockDeviceMappings=[{ - 'DeviceName': '/dev/sda1', - 'Ebs': {'DeleteOnTermination': True} - }] - ) - instance.load() - instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) - -@mock_ec2 -def test_create_instance_ebs_optimized(): - ec2_resource = boto3.resource('ec2', region_name='eu-west-1') - - instance = ec2_resource.create_instances( - ImageId = 'ami-12345678', - MaxCount = 1, - MinCount = 1, - EbsOptimized = True, - )[0] - instance.load() - instance.ebs_optimized.should.be(True) - - instance.modify_attribute( - EbsOptimized={ - 'Value': False - } - ) - instance.load() - instance.ebs_optimized.should.be(False) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import base64 +import datetime +import ipaddress + +import six +import boto +import boto3 +from boto.ec2.instance import Reservation, InstanceAttribute +from boto.exception import EC2ResponseError, EC2ResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from tests.helpers import requires_boto_gte + + +################ Test Readme ############### +def add_servers(ami_id, count): + conn = boto.connect_ec2() + for index in range(count): + conn.run_instances(ami_id) + + +@mock_ec2_deprecated +def test_add_servers(): + add_servers('ami-1234abcd', 2) + + conn = boto.connect_ec2() + reservations = conn.get_all_instances() + assert len(reservations) == 2 + instance1 = reservations[0].instances[0] + assert instance1.image_id == 'ami-1234abcd' + +############################################ + + +@freeze_time("2014-01-01 05:00:00") +@mock_ec2_deprecated +def test_instance_launch_and_terminate(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + reservation = conn.run_instances('ami-1234abcd', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RunInstance operation: Request would have succeeded, but DryRun flag is set') + + reservation = conn.run_instances('ami-1234abcd') + reservation.should.be.a(Reservation) + reservation.instances.should.have.length_of(1) + instance = reservation.instances[0] + instance.state.should.equal('pending') + + reservations = conn.get_all_instances() + reservations.should.have.length_of(1) + reservations[0].id.should.equal(reservation.id) + instances = reservations[0].instances + instances.should.have.length_of(1) + instance = instances[0] + instance.id.should.equal(instance.id) + instance.state.should.equal('running') + instance.launch_time.should.equal("2014-01-01T05:00:00.000Z") + instance.vpc_id.should.equal(None) + instance.placement.should.equal('us-east-1a') + + root_device_name = instance.root_device_name + instance.block_device_mapping[ + root_device_name].status.should.equal('in-use') + volume_id = instance.block_device_mapping[root_device_name].volume_id + volume_id.should.match(r'vol-\w+') + + volume = conn.get_all_volumes(volume_ids=[volume_id])[0] + volume.attach_data.instance_id.should.equal(instance.id) + volume.status.should.equal('in-use') + + with assert_raises(EC2ResponseError) as ex: + conn.terminate_instances([instance.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the TerminateInstance operation: Request would have succeeded, but DryRun flag is set') + + conn.terminate_instances([instance.id]) + + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + instance.state.should.equal('terminated') + + +@mock_ec2_deprecated +def test_terminate_empty_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.terminate_instances.when.called_with( + []).should.throw(EC2ResponseError) + + +@freeze_time("2014-01-01 05:00:00") +@mock_ec2_deprecated +def test_instance_attach_volume(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + vol1 = conn.create_volume(size=36, zone=conn.region.name) + vol1.attach(instance.id, "/dev/sda1") + vol1.update() + vol2 = conn.create_volume(size=65, zone=conn.region.name) + vol2.attach(instance.id, "/dev/sdb1") + vol2.update() + vol3 = conn.create_volume(size=130, zone=conn.region.name) + vol3.attach(instance.id, "/dev/sdc1") + vol3.update() + + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + + instance.block_device_mapping.should.have.length_of(3) + + for v in conn.get_all_volumes(volume_ids=[instance.block_device_mapping['/dev/sdc1'].volume_id]): + v.attach_data.instance_id.should.equal(instance.id) + # can do due to freeze_time decorator. + v.attach_data.attach_time.should.equal(instance.launch_time) + # can do due to freeze_time decorator. + v.create_time.should.equal(instance.launch_time) + v.region.name.should.equal(instance.region.name) + v.status.should.equal('in-use') + + +@mock_ec2_deprecated +def test_get_instances_by_id(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + + reservations = conn.get_all_instances(instance_ids=[instance1.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(1) + reservation.instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + instance_ids=[instance1.id, instance2.id]) + reservations.should.have.length_of(1) + reservation = reservations[0] + reservation.instances.should.have.length_of(2) + instance_ids = [instance.id for instance in reservation.instances] + instance_ids.should.equal([instance1.id, instance2.id]) + + # Call get_all_instances with a bad id should raise an error + with assert_raises(EC2ResponseError) as cm: + conn.get_all_instances(instance_ids=[instance1.id, "i-1234abcd"]) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_get_paginated_instances(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + for i in range(100): + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1) + resp = client.describe_instances(MaxResults=50) + reservations = resp['Reservations'] + reservations.should.have.length_of(50) + next_token = resp['NextToken'] + next_token.should_not.be.none + resp2 = client.describe_instances(NextToken=next_token) + reservations.extend(resp2['Reservations']) + reservations.should.have.length_of(100) + assert 'NextToken' not in resp2.keys() + + +@mock_ec2 +def test_create_with_tags(): + ec2 = boto3.client('ec2', region_name='us-west-2') + instances = ec2.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + assert 'Tags' in instances['Instances'][0] + len(instances['Instances'][0]['Tags']).should.equal(3) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_state(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + + conn.terminate_instances([instance1.id]) + + reservations = conn.get_all_instances( + filters={'instance-state-name': 'running'}) + reservations.should.have.length_of(1) + # Since we terminated instance1, only instance2 and instance3 should be + # returned + instance_ids = [instance.id for instance in reservations[0].instances] + set(instance_ids).should.equal(set([instance2.id, instance3.id])) + + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'running'}) + reservations.should.have.length_of(1) + instance_ids = [instance.id for instance in reservations[0].instances] + instance_ids.should.equal([instance2.id]) + + reservations = conn.get_all_instances( + [instance2.id], filters={'instance-state-name': 'terminated'}) + list(reservations).should.equal([]) + + # get_all_instances should still return all 3 + reservations = conn.get_all_instances() + reservations[0].instances.should.have.length_of(3) + + conn.get_all_instances.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_instance_id(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + + reservations = conn.get_all_instances( + filters={'instance-id': instance1.id}) + # get_all_instances should return just instance1 + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'instance-id': [instance1.id, instance2.id]}) + # get_all_instances should return two + reservations[0].instances.should.have.length_of(2) + + reservations = conn.get_all_instances( + filters={'instance-id': 'non-existing-id'}) + reservations.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_instance_type(): + conn = boto.connect_ec2() + reservation1 = conn.run_instances('ami-1234abcd', instance_type='m1.small') + instance1 = reservation1.instances[0] + reservation2 = conn.run_instances('ami-1234abcd', instance_type='m1.small') + instance2 = reservation2.instances[0] + reservation3 = conn.run_instances('ami-1234abcd', instance_type='t1.micro') + instance3 = reservation3.instances[0] + + reservations = conn.get_all_instances( + filters={'instance-type': 'm1.small'}) + # get_all_instances should return instance1,2 + reservations.should.have.length_of(2) + reservations[0].instances.should.have.length_of(1) + reservations[1].instances.should.have.length_of(1) + instance_ids = [reservations[0].instances[0].id, + reservations[1].instances[0].id] + set(instance_ids).should.equal(set([instance1.id, instance2.id])) + + reservations = conn.get_all_instances( + filters={'instance-type': 't1.micro'}) + # get_all_instances should return one + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance3.id) + + reservations = conn.get_all_instances( + filters={'instance-type': ['t1.micro', 'm1.small']}) + reservations.should.have.length_of(3) + reservations[0].instances.should.have.length_of(1) + reservations[1].instances.should.have.length_of(1) + reservations[2].instances.should.have.length_of(1) + instance_ids = [ + reservations[0].instances[0].id, + reservations[1].instances[0].id, + reservations[2].instances[0].id, + ] + set(instance_ids).should.equal( + set([instance1.id, instance2.id, instance3.id])) + + reservations = conn.get_all_instances(filters={'instance-type': 'bogus'}) + # bogus instance-type should return none + reservations.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_reason_code(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.stop() + instance2.terminate() + + reservations = conn.get_all_instances( + filters={'state-reason-code': 'Client.UserInitiatedShutdown'}) + # get_all_instances should return instance1 and instance2 + reservations[0].instances.should.have.length_of(2) + set([instance1.id, instance2.id]).should.equal( + set([i.id for i in reservations[0].instances])) + + reservations = conn.get_all_instances(filters={'state-reason-code': ''}) + # get_all_instances should return instance 3 + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_source_dest_check(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instance1, instance2 = reservation.instances + conn.modify_instance_attribute( + instance1.id, attribute='sourceDestCheck', value=False) + + source_dest_check_false = conn.get_all_instances( + filters={'source-dest-check': 'false'}) + source_dest_check_true = conn.get_all_instances( + filters={'source-dest-check': 'true'}) + + source_dest_check_false[0].instances.should.have.length_of(1) + source_dest_check_false[0].instances[0].id.should.equal(instance1.id) + + source_dest_check_true[0].instances.should.have.length_of(1) + source_dest_check_true[0].instances[0].id.should.equal(instance2.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_vpc_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc1 = conn.create_vpc("10.0.0.0/16") + subnet1 = conn.create_subnet(vpc1.id, "10.0.0.0/27") + reservation1 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet1.id) + instance1 = reservation1.instances[0] + + vpc2 = conn.create_vpc("10.1.0.0/16") + subnet2 = conn.create_subnet(vpc2.id, "10.1.0.0/27") + reservation2 = conn.run_instances( + 'ami-1234abcd', min_count=1, subnet_id=subnet2.id) + instance2 = reservation2.instances[0] + + reservations1 = conn.get_all_instances(filters={'vpc-id': vpc1.id}) + reservations1.should.have.length_of(1) + reservations1[0].instances.should.have.length_of(1) + reservations1[0].instances[0].id.should.equal(instance1.id) + reservations1[0].instances[0].vpc_id.should.equal(vpc1.id) + reservations1[0].instances[0].subnet_id.should.equal(subnet1.id) + + reservations2 = conn.get_all_instances(filters={'vpc-id': vpc2.id}) + reservations2.should.have.length_of(1) + reservations2[0].instances.should.have.length_of(1) + reservations2[0].instances[0].id.should.equal(instance2.id) + reservations2[0].instances[0].vpc_id.should.equal(vpc2.id) + reservations2[0].instances[0].subnet_id.should.equal(subnet2.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_architecture(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=1) + instance = reservation.instances + + reservations = conn.get_all_instances(filters={'architecture': 'x86_64'}) + # get_all_instances should return the instance + reservations[0].instances.should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_image_id(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1) + + reservations = client.describe_instances(Filters=[{'Name': 'image-id', + 'Values': [image_id]}])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_private_dns(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + conn = boto3.resource('ec2', 'us-east-1') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + PrivateIpAddress='10.0.0.1') + reservations = client.describe_instances(Filters=[ + {'Name': 'private-dns-name', 'Values': ['ip-10-0-0-1.ec2.internal']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_ni_private_dns(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-west-2') + conn = boto3.resource('ec2', 'us-west-2') + conn.create_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + PrivateIpAddress='10.0.0.1') + reservations = client.describe_instances(Filters=[ + {'Name': 'network-interface.private-dns-name', 'Values': ['ip-10-0-0-1.us-west-2.compute.internal']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_instance_group_name(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + client.create_security_group( + Description='test', + GroupName='test_sg' + ) + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-name', 'Values': ['test_sg']} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2 +def test_get_instances_filtering_by_instance_group_id(): + image_id = 'ami-1234abcd' + client = boto3.client('ec2', region_name='us-east-1') + create_sg = client.create_security_group( + Description='test', + GroupName='test_sg' + ) + group_id = create_sg['GroupId'] + client.run_instances(ImageId=image_id, + MinCount=1, + MaxCount=1, + SecurityGroups=['test_sg']) + reservations = client.describe_instances(Filters=[ + {'Name': 'instance.group-id', 'Values': [group_id]} + ])['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1', 'value1') + instance1.add_tag('tag2', 'value2') + instance2.add_tag('tag1', 'value1') + instance2.add_tag('tag2', 'wrong value') + instance3.add_tag('tag2', 'value2') + + reservations = conn.get_all_instances(filters={'tag:tag0': 'value0'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag:tag1': 'value1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + # get_all_instances should return the instance with both tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + # get_all_instances should return the instance with both tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(1) + reservations[0].instances[0].id.should.equal(instance1.id) + + reservations = conn.get_all_instances( + filters={'tag:tag2': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag_value(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1', 'value1') + instance1.add_tag('tag2', 'value2') + instance2.add_tag('tag1', 'value1') + instance2.add_tag('tag2', 'wrong value') + instance3.add_tag('tag2', 'value2') + + reservations = conn.get_all_instances(filters={'tag-value': 'value0'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag-value': 'value1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'value1']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(3) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + reservations[0].instances[2].id.should.equal(instance3.id) + + reservations = conn.get_all_instances( + filters={'tag-value': ['value2', 'bogus']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_get_instances_filtering_by_tag_name(): + conn = boto.connect_ec2() + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.add_tag('tag1') + instance1.add_tag('tag2') + instance2.add_tag('tag1') + instance2.add_tag('tag2X') + instance3.add_tag('tag3') + + reservations = conn.get_all_instances(filters={'tag-key': 'tagX'}) + # get_all_instances should return no instances + reservations.should.have.length_of(0) + + reservations = conn.get_all_instances(filters={'tag-key': 'tag1'}) + # get_all_instances should return both instances with this tag value + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(2) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + + reservations = conn.get_all_instances( + filters={'tag-key': ['tag1', 'tag3']}) + # get_all_instances should return both instances with one of the + # acceptable tag values + reservations.should.have.length_of(1) + reservations[0].instances.should.have.length_of(3) + reservations[0].instances[0].id.should.equal(instance1.id) + reservations[0].instances[1].id.should.equal(instance2.id) + reservations[0].instances[2].id.should.equal(instance3.id) + + +@mock_ec2_deprecated +def test_instance_start_and_stop(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', min_count=2) + instances = reservation.instances + instances.should.have.length_of(2) + + instance_ids = [instance.id for instance in instances] + + with assert_raises(EC2ResponseError) as ex: + stopped_instances = conn.stop_instances(instance_ids, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StopInstance operation: Request would have succeeded, but DryRun flag is set') + + stopped_instances = conn.stop_instances(instance_ids) + + for instance in stopped_instances: + instance.state.should.equal('stopping') + + with assert_raises(EC2ResponseError) as ex: + started_instances = conn.start_instances( + [instances[0].id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the StartInstance operation: Request would have succeeded, but DryRun flag is set') + + started_instances = conn.start_instances([instances[0].id]) + started_instances[0].state.should.equal('pending') + + +@mock_ec2_deprecated +def test_instance_reboot(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.reboot(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RebootInstance operation: Request would have succeeded, but DryRun flag is set') + + instance.reboot() + instance.state.should.equal('pending') + + +@mock_ec2_deprecated +def test_instance_attribute_instance_type(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("instanceType", "m1.small", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceType operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("instanceType", "m1.small") + + instance_attribute = instance.get_attribute("instanceType") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get('instanceType').should.equal("m1.small") + + +@mock_ec2_deprecated +def test_modify_instance_attribute_security_groups(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + sg_id = 'sg-1234abcd' + sg_id2 = 'sg-abcd4321' + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("groupSet", [sg_id, sg_id2], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("groupSet", [sg_id, sg_id2]) + + instance_attribute = instance.get_attribute("groupSet") + instance_attribute.should.be.a(InstanceAttribute) + group_list = instance_attribute.get('groupSet') + any(g.id == sg_id for g in group_list).should.be.ok + any(g.id == sg_id2 for g in group_list).should.be.ok + + +@mock_ec2_deprecated +def test_instance_attribute_user_data(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute( + "userData", "this is my user data", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyUserData operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("userData", "this is my user data") + + instance_attribute = instance.get_attribute("userData") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("userData").should.equal("this is my user data") + + +@mock_ec2_deprecated +def test_instance_attribute_source_dest_check(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + # Default value is true + instance.sourceDestCheck.should.equal('true') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(True) + + # Set to false (note: Boto converts bool to string, eg 'false') + + with assert_raises(EC2ResponseError) as ex: + instance.modify_attribute("sourceDestCheck", False, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifySourceDestCheck operation: Request would have succeeded, but DryRun flag is set') + + instance.modify_attribute("sourceDestCheck", False) + + instance.update() + instance.sourceDestCheck.should.equal('false') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(False) + + # Set back to true + instance.modify_attribute("sourceDestCheck", True) + + instance.update() + instance.sourceDestCheck.should.equal('true') + + instance_attribute = instance.get_attribute("sourceDestCheck") + instance_attribute.should.be.a(InstanceAttribute) + instance_attribute.get("sourceDestCheck").should.equal(True) + + +@mock_ec2_deprecated +def test_user_data_with_run_instance(): + user_data = b"some user data" + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', user_data=user_data) + instance = reservation.instances[0] + + instance_attribute = instance.get_attribute("userData") + instance_attribute.should.be.a(InstanceAttribute) + retrieved_user_data = instance_attribute.get("userData").encode('utf-8') + decoded_user_data = base64.decodestring(retrieved_user_data) + decoded_user_data.should.equal(b"some user data") + + +@mock_ec2_deprecated +def test_run_instance_with_security_group_name(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + group = conn.create_security_group( + 'group1', "some description", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + group = conn.create_security_group('group1', "some description") + + reservation = conn.run_instances('ami-1234abcd', + security_groups=['group1']) + instance = reservation.instances[0] + + instance.groups[0].id.should.equal(group.id) + instance.groups[0].name.should.equal("group1") + + +@mock_ec2_deprecated +def test_run_instance_with_security_group_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + group = conn.create_security_group('group1', "some description") + reservation = conn.run_instances('ami-1234abcd', + security_group_ids=[group.id]) + instance = reservation.instances[0] + + instance.groups[0].id.should.equal(group.id) + instance.groups[0].name.should.equal("group1") + + +@mock_ec2_deprecated +def test_run_instance_with_instance_type(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', instance_type="t1.micro") + instance = reservation.instances[0] + + instance.instance_type.should.equal("t1.micro") + + +@mock_ec2_deprecated +def test_run_instance_with_default_placement(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.placement.should.equal("us-east-1a") + + +@mock_ec2_deprecated +def test_run_instance_with_placement(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', placement="us-east-1b") + instance = reservation.instances[0] + + instance.placement.should.equal("us-east-1b") + + +@mock_ec2 +def test_run_instance_with_subnet_boto3(): + client = boto3.client('ec2', region_name='eu-central-1') + + ip_networks = [ + (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), + (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) + ] + + # Tests instances are created with the correct IPs + for vpc_cidr, subnet_cidr in ip_networks: + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + + priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) + subnet_cidr.should.contain(priv_ipv4) + + +@mock_ec2 +def test_run_instance_with_specified_private_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id, + PrivateIpAddress='192.168.42.5' + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + instance['PrivateIpAddress'].should.equal('192.168.42.5') + + +@mock_ec2 +def test_run_instance_mapped_public_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + client.modify_subnet_attribute( + SubnetId=subnet_id, + MapPublicIpOnLaunch={'Value': True} + ) + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance.should.contain('PublicDnsName') + instance.should.contain('PublicIpAddress') + len(instance['PublicDnsName']).should.be.greater_than(0) + len(instance['PublicIpAddress']).should.be.greater_than(0) + + +@mock_ec2_deprecated +def test_run_instance_with_nic_autocreated(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + private_ip = "10.0.0.1" + + reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, + security_groups=[security_group1.name], + security_group_ids=[security_group2.id], + private_ip_address=private_ip) + instance = reservation.instances[0] + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + eni = all_enis[0] + + instance.interfaces.should.have.length_of(1) + instance.interfaces[0].id.should.equal(eni.id) + + instance.subnet_id.should.equal(subnet.id) + instance.groups.should.have.length_of(2) + set([group.id for group in instance.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + eni.subnet_id.should.equal(subnet.id) + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + eni.private_ip_addresses.should.have.length_of(1) + eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + + +@mock_ec2_deprecated +def test_run_instance_with_nic_preexisting(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + private_ip = "54.0.0.1" + eni = conn.create_network_interface( + subnet.id, private_ip, groups=[security_group1.id]) + + # Boto requires NetworkInterfaceCollection of NetworkInterfaceSpecifications... + # annoying, but generates the desired querystring. + from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection + interface = NetworkInterfaceSpecification( + network_interface_id=eni.id, device_index=0) + interfaces = NetworkInterfaceCollection(interface) + # end Boto objects + + reservation = conn.run_instances('ami-1234abcd', network_interfaces=interfaces, + security_group_ids=[security_group2.id]) + instance = reservation.instances[0] + + instance.subnet_id.should.equal(subnet.id) + + all_enis = conn.get_all_network_interfaces() + all_enis.should.have.length_of(1) + + instance.interfaces.should.have.length_of(1) + instance_eni = instance.interfaces[0] + instance_eni.id.should.equal(eni.id) + + instance_eni.subnet_id.should.equal(subnet.id) + instance_eni.groups.should.have.length_of(2) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + instance_eni.private_ip_addresses.should.have.length_of(1) + instance_eni.private_ip_addresses[ + 0].private_ip_address.should.equal(private_ip) + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_instance_with_nic_attach_detach(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + security_group1 = conn.create_security_group( + 'test security group #1', 'this is a test security group') + security_group2 = conn.create_security_group( + 'test security group #2', 'this is a test security group') + + reservation = conn.run_instances( + 'ami-1234abcd', security_group_ids=[security_group1.id]) + instance = reservation.instances[0] + + eni = conn.create_network_interface(subnet.id, groups=[security_group2.id]) + + # Check initial instance and ENI data + instance.interfaces.should.have.length_of(1) + + eni.groups.should.have.length_of(1) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) + + # Attach + with assert_raises(EC2ResponseError) as ex: + conn.attach_network_interface( + eni.id, instance.id, device_index=1, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.attach_network_interface(eni.id, instance.id, device_index=1) + + # Check attached instance and ENI data + instance.update() + instance.interfaces.should.have.length_of(2) + instance_eni = instance.interfaces[1] + instance_eni.id.should.equal(eni.id) + instance_eni.groups.should.have.length_of(2) + set([group.id for group in instance_eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] + eni.groups.should.have.length_of(2) + set([group.id for group in eni.groups]).should.equal( + set([security_group1.id, security_group2.id])) + + # Detach + with assert_raises(EC2ResponseError) as ex: + conn.detach_network_interface(instance_eni.attachment.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachNetworkInterface operation: Request would have succeeded, but DryRun flag is set') + + conn.detach_network_interface(instance_eni.attachment.id) + + # Check detached instance and ENI data + instance.update() + instance.interfaces.should.have.length_of(1) + + eni = conn.get_all_network_interfaces( + filters={'network-interface-id': eni.id})[0] + eni.groups.should.have.length_of(1) + set([group.id for group in eni.groups]).should.equal( + set([security_group2.id])) + + # Detach with invalid attachment ID + with assert_raises(EC2ResponseError) as cm: + conn.detach_network_interface('eni-attach-1234abcd') + cm.exception.code.should.equal('InvalidAttachmentID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_ec2_classic_has_public_ip_address(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + instance.ip_address.should_not.equal(None) + instance.public_dns_name.should.contain(instance.ip_address.replace('.', '-')) + instance.private_ip_address.should_not.equal(None) + instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) + + +@mock_ec2_deprecated +def test_run_instance_with_keypair(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + + instance.key_name.should.equal("keypair_name") + + +@mock_ec2_deprecated +def test_describe_instance_status_no_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + all_status = conn.get_all_instance_status() + len(all_status).should.equal(0) + + +@mock_ec2_deprecated +def test_describe_instance_status_with_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.run_instances('ami-1234abcd', key_name="keypair_name") + + all_status = conn.get_all_instance_status() + len(all_status).should.equal(1) + all_status[0].instance_status.status.should.equal('ok') + all_status[0].system_status.status.should.equal('ok') + + +@mock_ec2_deprecated +def test_describe_instance_status_with_instance_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + + # We want to filter based on this one + reservation = conn.run_instances('ami-1234abcd', key_name="keypair_name") + instance = reservation.instances[0] + + # This is just to setup the test + conn.run_instances('ami-1234abcd', key_name="keypair_name") + + all_status = conn.get_all_instance_status(instance_ids=[instance.id]) + len(all_status).should.equal(1) + all_status[0].id.should.equal(instance.id) + + # Call get_all_instance_status with a bad id should raise an error + with assert_raises(EC2ResponseError) as cm: + conn.get_all_instance_status(instance_ids=[instance.id, "i-1234abcd"]) + cm.exception.code.should.equal('InvalidInstanceID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_describe_instance_status_with_non_running_instances(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd', min_count=3) + instance1, instance2, instance3 = reservation.instances + instance1.stop() + instance2.terminate() + + all_running_status = conn.get_all_instance_status() + all_running_status.should.have.length_of(1) + all_running_status[0].id.should.equal(instance3.id) + all_running_status[0].state_name.should.equal('running') + + all_status = conn.get_all_instance_status(include_all_instances=True) + all_status.should.have.length_of(3) + + status1 = next((s for s in all_status if s.id == instance1.id), None) + status1.state_name.should.equal('stopped') + + status2 = next((s for s in all_status if s.id == instance2.id), None) + status2.state_name.should.equal('terminated') + + status3 = next((s for s in all_status if s.id == instance3.id), None) + status3.state_name.should.equal('running') + + +@mock_ec2_deprecated +def test_get_instance_by_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + + conn.run_instances('ami-1234abcd') + instance = conn.get_only_instances()[0] + + security_group = conn.create_security_group('test', 'test') + + with assert_raises(EC2ResponseError) as ex: + conn.modify_instance_attribute(instance.id, "groupSet", [ + security_group.id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ModifyInstanceSecurityGroups operation: Request would have succeeded, but DryRun flag is set') + + conn.modify_instance_attribute( + instance.id, "groupSet", [security_group.id]) + + security_group_instances = security_group.instances() + + assert len(security_group_instances) == 1 + assert security_group_instances[0].id == instance.id + + +@mock_ec2 +def test_modify_delete_on_termination(): + ec2_client = boto3.resource('ec2', region_name='us-west-1') + result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) + instance = result[0] + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) + +@mock_ec2 +def test_create_instance_ebs_optimized(): + ec2_resource = boto3.resource('ec2', region_name='eu-west-1') + + instance = ec2_resource.create_instances( + ImageId = 'ami-12345678', + MaxCount = 1, + MinCount = 1, + EbsOptimized = True, + )[0] + instance.load() + instance.ebs_optimized.should.be(True) + + instance.modify_attribute( + EbsOptimized={ + 'Value': False + } + ) + instance.load() + instance.ebs_optimized.should.be(False) diff --git a/tests/test_ec2/test_internet_gateways.py b/tests/test_ec2/test_internet_gateways.py index 3a1d0fda9..1f010223c 100644 --- a/tests/test_ec2/test_internet_gateways.py +++ b/tests/test_ec2/test_internet_gateways.py @@ -1,269 +1,269 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import re - -import boto -from boto.exception import EC2ResponseError - -import sure # noqa - -from moto import mock_ec2_deprecated - - -VPC_CIDR = "10.0.0.0/16" -BAD_VPC = "vpc-deadbeef" -BAD_IGW = "igw-deadbeef" - - -@mock_ec2_deprecated -def test_igw_create(): - """ internet gateway create """ - conn = boto.connect_vpc('the_key', 'the_secret') - - conn.get_all_internet_gateways().should.have.length_of(0) - - with assert_raises(EC2ResponseError) as ex: - igw = conn.create_internet_gateway(dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - igw = conn.create_internet_gateway() - conn.get_all_internet_gateways().should.have.length_of(1) - igw.id.should.match(r'igw-[0-9a-f]+') - - igw = conn.get_all_internet_gateways()[0] - igw.attachments.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_attach(): - """ internet gateway attach """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - - with assert_raises(EC2ResponseError) as ex: - conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.attach_internet_gateway(igw.id, vpc.id) - - igw = conn.get_all_internet_gateways()[0] - igw.attachments[0].vpc_id.should.be.equal(vpc.id) - - -@mock_ec2_deprecated -def test_igw_attach_bad_vpc(): - """ internet gateway fail to attach w/ bad vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - - with assert_raises(EC2ResponseError) as cm: - conn.attach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_attach_twice(): - """ internet gateway fail to attach twice """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc1 = conn.create_vpc(VPC_CIDR) - vpc2 = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.attach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal('Resource.AlreadyAssociated') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach(): - """ internet gateway detach""" - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as ex: - conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.detach_internet_gateway(igw.id, vpc.id) - igw = conn.get_all_internet_gateways()[0] - igw.attachments.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_detach_wrong_vpc(): - """ internet gateway fail to detach w/ wrong vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc1 = conn.create_vpc(VPC_CIDR) - vpc2 = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, vpc2.id) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach_invalid_vpc(): - """ internet gateway fail to detach w/ invalid vpc """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, BAD_VPC) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_detach_unattached(): - """ internet gateway fail to detach unattached """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - - with assert_raises(EC2ResponseError) as cm: - conn.detach_internet_gateway(igw.id, vpc.id) - cm.exception.code.should.equal('Gateway.NotAttached') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_delete(): - """ internet gateway delete""" - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc(VPC_CIDR) - conn.get_all_internet_gateways().should.have.length_of(0) - igw = conn.create_internet_gateway() - conn.get_all_internet_gateways().should.have.length_of(1) - - with assert_raises(EC2ResponseError) as ex: - conn.delete_internet_gateway(igw.id, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_internet_gateway(igw.id) - conn.get_all_internet_gateways().should.have.length_of(0) - - -@mock_ec2_deprecated -def test_igw_delete_attached(): - """ internet gateway fail to delete attached """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw.id, vpc.id) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_internet_gateway(igw.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_desribe(): - """ internet gateway fetch by id """ - conn = boto.connect_vpc('the_key', 'the_secret') - igw = conn.create_internet_gateway() - igw_by_search = conn.get_all_internet_gateways([igw.id])[0] - igw.id.should.equal(igw_by_search.id) - - -@mock_ec2_deprecated -def test_igw_describe_bad_id(): - """ internet gateway fail to fetch by bad id """ - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.get_all_internet_gateways([BAD_IGW]) - cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_igw_filter_by_vpc_id(): - """ internet gateway filter by vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw1.id, vpc.id) - - result = conn.get_all_internet_gateways( - filters={"attachment.vpc-id": vpc.id}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_tags(): - """ internet gateway filter by vpc id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - igw1.add_tag("tests", "yes") - - result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_internet_gateway_id(): - """ internet gateway filter by internet gateway id """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - - result = conn.get_all_internet_gateways( - filters={"internet-gateway-id": igw1.id}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) - - -@mock_ec2_deprecated -def test_igw_filter_by_attachment_state(): - """ internet gateway filter by attachment state """ - conn = boto.connect_vpc('the_key', 'the_secret') - - igw1 = conn.create_internet_gateway() - igw2 = conn.create_internet_gateway() - vpc = conn.create_vpc(VPC_CIDR) - conn.attach_internet_gateway(igw1.id, vpc.id) - - result = conn.get_all_internet_gateways( - filters={"attachment.state": "available"}) - result.should.have.length_of(1) - result[0].id.should.equal(igw1.id) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import re + +import boto +from boto.exception import EC2ResponseError + +import sure # noqa + +from moto import mock_ec2_deprecated + + +VPC_CIDR = "10.0.0.0/16" +BAD_VPC = "vpc-deadbeef" +BAD_IGW = "igw-deadbeef" + + +@mock_ec2_deprecated +def test_igw_create(): + """ internet gateway create """ + conn = boto.connect_vpc('the_key', 'the_secret') + + conn.get_all_internet_gateways().should.have.length_of(0) + + with assert_raises(EC2ResponseError) as ex: + igw = conn.create_internet_gateway(dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + igw = conn.create_internet_gateway() + conn.get_all_internet_gateways().should.have.length_of(1) + igw.id.should.match(r'igw-[0-9a-f]+') + + igw = conn.get_all_internet_gateways()[0] + igw.attachments.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_attach(): + """ internet gateway attach """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + + with assert_raises(EC2ResponseError) as ex: + conn.attach_internet_gateway(igw.id, vpc.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the AttachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.attach_internet_gateway(igw.id, vpc.id) + + igw = conn.get_all_internet_gateways()[0] + igw.attachments[0].vpc_id.should.be.equal(vpc.id) + + +@mock_ec2_deprecated +def test_igw_attach_bad_vpc(): + """ internet gateway fail to attach w/ bad vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + + with assert_raises(EC2ResponseError) as cm: + conn.attach_internet_gateway(igw.id, BAD_VPC) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_attach_twice(): + """ internet gateway fail to attach twice """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc1 = conn.create_vpc(VPC_CIDR) + vpc2 = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.attach_internet_gateway(igw.id, vpc2.id) + cm.exception.code.should.equal('Resource.AlreadyAssociated') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach(): + """ internet gateway detach""" + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as ex: + conn.detach_internet_gateway(igw.id, vpc.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DetachInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.detach_internet_gateway(igw.id, vpc.id) + igw = conn.get_all_internet_gateways()[0] + igw.attachments.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_detach_wrong_vpc(): + """ internet gateway fail to detach w/ wrong vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc1 = conn.create_vpc(VPC_CIDR) + vpc2 = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, vpc2.id) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach_invalid_vpc(): + """ internet gateway fail to detach w/ invalid vpc """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, BAD_VPC) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_detach_unattached(): + """ internet gateway fail to detach unattached """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + + with assert_raises(EC2ResponseError) as cm: + conn.detach_internet_gateway(igw.id, vpc.id) + cm.exception.code.should.equal('Gateway.NotAttached') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_delete(): + """ internet gateway delete""" + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc(VPC_CIDR) + conn.get_all_internet_gateways().should.have.length_of(0) + igw = conn.create_internet_gateway() + conn.get_all_internet_gateways().should.have.length_of(1) + + with assert_raises(EC2ResponseError) as ex: + conn.delete_internet_gateway(igw.id, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteInternetGateway operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_internet_gateway(igw.id) + conn.get_all_internet_gateways().should.have.length_of(0) + + +@mock_ec2_deprecated +def test_igw_delete_attached(): + """ internet gateway fail to delete attached """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw.id, vpc.id) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_internet_gateway(igw.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_desribe(): + """ internet gateway fetch by id """ + conn = boto.connect_vpc('the_key', 'the_secret') + igw = conn.create_internet_gateway() + igw_by_search = conn.get_all_internet_gateways([igw.id])[0] + igw.id.should.equal(igw_by_search.id) + + +@mock_ec2_deprecated +def test_igw_describe_bad_id(): + """ internet gateway fail to fetch by bad id """ + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.get_all_internet_gateways([BAD_IGW]) + cm.exception.code.should.equal('InvalidInternetGatewayID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_igw_filter_by_vpc_id(): + """ internet gateway filter by vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw1.id, vpc.id) + + result = conn.get_all_internet_gateways( + filters={"attachment.vpc-id": vpc.id}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_tags(): + """ internet gateway filter by vpc id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + igw1.add_tag("tests", "yes") + + result = conn.get_all_internet_gateways(filters={"tag:tests": "yes"}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_internet_gateway_id(): + """ internet gateway filter by internet gateway id """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + + result = conn.get_all_internet_gateways( + filters={"internet-gateway-id": igw1.id}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) + + +@mock_ec2_deprecated +def test_igw_filter_by_attachment_state(): + """ internet gateway filter by attachment state """ + conn = boto.connect_vpc('the_key', 'the_secret') + + igw1 = conn.create_internet_gateway() + igw2 = conn.create_internet_gateway() + vpc = conn.create_vpc(VPC_CIDR) + conn.attach_internet_gateway(igw1.id, vpc.id) + + result = conn.get_all_internet_gateways( + filters={"attachment.state": "available"}) + result.should.have.length_of(1) + result[0].id.should.equal(igw1.id) diff --git a/tests/test_ec2/test_ip_addresses.py b/tests/test_ec2/test_ip_addresses.py index a8e927b00..60cf1cfc6 100644 --- a/tests/test_ec2/test_ip_addresses.py +++ b/tests/test_ec2/test_ip_addresses.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_ip_addresses(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_ip_addresses(): + pass diff --git a/tests/test_ec2/test_key_pairs.py b/tests/test_ec2/test_key_pairs.py index 0a7fb9f76..75c1aa73f 100644 --- a/tests/test_ec2/test_key_pairs.py +++ b/tests/test_ec2/test_key_pairs.py @@ -1,151 +1,151 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import six -import sure # noqa - -from boto.exception import EC2ResponseError -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_key_pairs_empty(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_invalid_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_key_pairs('foo') - cm.exception.code.should.equal('InvalidKeyPair.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_create(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.create_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_create_two(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - kp = conn.create_key_pair('bar') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - kps = conn.get_all_key_pairs() - kps.should.have.length_of(2) - [i.name for i in kps].should.contain('foo') - [i.name for i in kps].should.contain('bar') - kps = conn.get_all_key_pairs('foo') - kps.should.have.length_of(1) - kps[0].name.should.equal('foo') - - -@mock_ec2_deprecated -def test_key_pairs_create_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.create_key_pair('foo') - assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pairs_delete_no_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - assert len(conn.get_all_key_pairs()) == 0 - r = conn.delete_key_pair('foo') - r.should.be.ok - - -@mock_ec2_deprecated -def test_key_pairs_delete_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - conn.create_key_pair('foo') - - with assert_raises(EC2ResponseError) as ex: - r = conn.delete_key_pair('foo', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') - - r = conn.delete_key_pair('foo') - r.should.be.ok - assert len(conn.get_all_key_pairs()) == 0 - - -@mock_ec2_deprecated -def test_key_pairs_import(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - kp = conn.import_key_pair('foo', b'content', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') - - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - kps = conn.get_all_key_pairs() - assert len(kps) == 1 - assert kps[0].name == 'foo' - - -@mock_ec2_deprecated -def test_key_pairs_import_exist(): - conn = boto.connect_ec2('the_key', 'the_secret') - kp = conn.import_key_pair('foo', b'content') - assert kp.name == 'foo' - assert len(conn.get_all_key_pairs()) == 1 - - with assert_raises(EC2ResponseError) as cm: - conn.create_key_pair('foo') - cm.exception.code.should.equal('InvalidKeyPair.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_key_pair_filters(): - conn = boto.connect_ec2('the_key', 'the_secret') - - _ = conn.create_key_pair('kpfltr1') - kp2 = conn.create_key_pair('kpfltr2') - kp3 = conn.create_key_pair('kpfltr3') - - kp_by_name = conn.get_all_key_pairs( - filters={'key-name': 'kpfltr2'}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp2.name])) - - kp_by_name = conn.get_all_key_pairs( - filters={'fingerprint': kp3.fingerprint}) - set([kp.name for kp in kp_by_name] - ).should.equal(set([kp3.name])) +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import six +import sure # noqa + +from boto.exception import EC2ResponseError +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_key_pairs_empty(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_invalid_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_key_pairs('foo') + cm.exception.code.should.equal('InvalidKeyPair.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_create(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + kp = conn.create_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp = conn.create_key_pair('foo') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + kps = conn.get_all_key_pairs() + assert len(kps) == 1 + assert kps[0].name == 'foo' + + +@mock_ec2_deprecated +def test_key_pairs_create_two(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.create_key_pair('foo') + kp = conn.create_key_pair('bar') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + kps = conn.get_all_key_pairs() + kps.should.have.length_of(2) + [i.name for i in kps].should.contain('foo') + [i.name for i in kps].should.contain('bar') + kps = conn.get_all_key_pairs('foo') + kps.should.have.length_of(1) + kps[0].name.should.equal('foo') + + +@mock_ec2_deprecated +def test_key_pairs_create_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.create_key_pair('foo') + assert kp.material.startswith('---- BEGIN RSA PRIVATE KEY ----') + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pairs_delete_no_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + assert len(conn.get_all_key_pairs()) == 0 + r = conn.delete_key_pair('foo') + r.should.be.ok + + +@mock_ec2_deprecated +def test_key_pairs_delete_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + conn.create_key_pair('foo') + + with assert_raises(EC2ResponseError) as ex: + r = conn.delete_key_pair('foo', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteKeyPair operation: Request would have succeeded, but DryRun flag is set') + + r = conn.delete_key_pair('foo') + r.should.be.ok + assert len(conn.get_all_key_pairs()) == 0 + + +@mock_ec2_deprecated +def test_key_pairs_import(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + kp = conn.import_key_pair('foo', b'content', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the ImportKeyPair operation: Request would have succeeded, but DryRun flag is set') + + kp = conn.import_key_pair('foo', b'content') + assert kp.name == 'foo' + kps = conn.get_all_key_pairs() + assert len(kps) == 1 + assert kps[0].name == 'foo' + + +@mock_ec2_deprecated +def test_key_pairs_import_exist(): + conn = boto.connect_ec2('the_key', 'the_secret') + kp = conn.import_key_pair('foo', b'content') + assert kp.name == 'foo' + assert len(conn.get_all_key_pairs()) == 1 + + with assert_raises(EC2ResponseError) as cm: + conn.create_key_pair('foo') + cm.exception.code.should.equal('InvalidKeyPair.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_key_pair_filters(): + conn = boto.connect_ec2('the_key', 'the_secret') + + _ = conn.create_key_pair('kpfltr1') + kp2 = conn.create_key_pair('kpfltr2') + kp3 = conn.create_key_pair('kpfltr3') + + kp_by_name = conn.get_all_key_pairs( + filters={'key-name': 'kpfltr2'}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp2.name])) + + kp_by_name = conn.get_all_key_pairs( + filters={'fingerprint': kp3.fingerprint}) + set([kp.name for kp in kp_by_name] + ).should.equal(set([kp3.name])) diff --git a/tests/test_ec2/test_monitoring.py b/tests/test_ec2/test_monitoring.py index 03be93adf..95bd36e6a 100644 --- a/tests/test_ec2/test_monitoring.py +++ b/tests/test_ec2/test_monitoring.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_monitoring(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_monitoring(): + pass diff --git a/tests/test_ec2/test_nat_gateway.py b/tests/test_ec2/test_nat_gateway.py index 27e8753be..310ae2c3a 100644 --- a/tests/test_ec2/test_nat_gateway.py +++ b/tests/test_ec2/test_nat_gateway.py @@ -1,109 +1,109 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa -from moto import mock_ec2 - - -@mock_ec2 -def test_describe_nat_gateways(): - conn = boto3.client('ec2', 'us-east-1') - - response = conn.describe_nat_gateways() - - response['NatGateways'].should.have.length_of(0) - - -@mock_ec2 -def test_create_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - response = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - - response['NatGateway']['VpcId'].should.equal(vpc_id) - response['NatGateway']['SubnetId'].should.equal(subnet_id) - response['NatGateway']['State'].should.equal('available') - - -@mock_ec2 -def test_delete_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - nat_gateway = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] - response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) - - # this is hard to match against, so remove it - response['ResponseMetadata'].pop('HTTPHeaders', None) - response['ResponseMetadata'].pop('RetryAttempts', None) - response.should.equal({ - 'NatGatewayId': nat_gateway_id, - 'ResponseMetadata': { - 'HTTPStatusCode': 200, - 'RequestId': '741fc8ab-6ebe-452b-b92b-example' - } - }) - - -@mock_ec2 -def test_create_and_describe_nat_gateway(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') - vpc_id = vpc['Vpc']['VpcId'] - subnet = conn.create_subnet( - VpcId=vpc_id, - CidrBlock='10.0.1.0/27', - AvailabilityZone='us-east-1a', - ) - allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] - subnet_id = subnet['Subnet']['SubnetId'] - - create_response = conn.create_nat_gateway( - SubnetId=subnet_id, - AllocationId=allocation_id, - ) - nat_gateway_id = create_response['NatGateway']['NatGatewayId'] - describe_response = conn.describe_nat_gateways() - - enis = conn.describe_network_interfaces()['NetworkInterfaces'] - eni_id = enis[0]['NetworkInterfaceId'] - public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ - 'Addresses'][0]['PublicIp'] - - describe_response['NatGateways'].should.have.length_of(1) - describe_response['NatGateways'][0][ - 'NatGatewayId'].should.equal(nat_gateway_id) - describe_response['NatGateways'][0]['State'].should.equal('available') - describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) - describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['AllocationId'].should.equal(allocation_id) - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['NetworkInterfaceId'].should.equal(eni_id) - assert describe_response['NatGateways'][0][ - 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') - describe_response['NatGateways'][0]['NatGatewayAddresses'][ - 0]['PublicIp'].should.equal(public_ip) +from __future__ import unicode_literals +import boto3 +import sure # noqa +from moto import mock_ec2 + + +@mock_ec2 +def test_describe_nat_gateways(): + conn = boto3.client('ec2', 'us-east-1') + + response = conn.describe_nat_gateways() + + response['NatGateways'].should.have.length_of(0) + + +@mock_ec2 +def test_create_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + response = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + + response['NatGateway']['VpcId'].should.equal(vpc_id) + response['NatGateway']['SubnetId'].should.equal(subnet_id) + response['NatGateway']['State'].should.equal('available') + + +@mock_ec2 +def test_delete_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + nat_gateway = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + nat_gateway_id = nat_gateway['NatGateway']['NatGatewayId'] + response = conn.delete_nat_gateway(NatGatewayId=nat_gateway_id) + + # this is hard to match against, so remove it + response['ResponseMetadata'].pop('HTTPHeaders', None) + response['ResponseMetadata'].pop('RetryAttempts', None) + response.should.equal({ + 'NatGatewayId': nat_gateway_id, + 'ResponseMetadata': { + 'HTTPStatusCode': 200, + 'RequestId': '741fc8ab-6ebe-452b-b92b-example' + } + }) + + +@mock_ec2 +def test_create_and_describe_nat_gateway(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock='10.0.0.0/16') + vpc_id = vpc['Vpc']['VpcId'] + subnet = conn.create_subnet( + VpcId=vpc_id, + CidrBlock='10.0.1.0/27', + AvailabilityZone='us-east-1a', + ) + allocation_id = conn.allocate_address(Domain='vpc')['AllocationId'] + subnet_id = subnet['Subnet']['SubnetId'] + + create_response = conn.create_nat_gateway( + SubnetId=subnet_id, + AllocationId=allocation_id, + ) + nat_gateway_id = create_response['NatGateway']['NatGatewayId'] + describe_response = conn.describe_nat_gateways() + + enis = conn.describe_network_interfaces()['NetworkInterfaces'] + eni_id = enis[0]['NetworkInterfaceId'] + public_ip = conn.describe_addresses(AllocationIds=[allocation_id])[ + 'Addresses'][0]['PublicIp'] + + describe_response['NatGateways'].should.have.length_of(1) + describe_response['NatGateways'][0][ + 'NatGatewayId'].should.equal(nat_gateway_id) + describe_response['NatGateways'][0]['State'].should.equal('available') + describe_response['NatGateways'][0]['SubnetId'].should.equal(subnet_id) + describe_response['NatGateways'][0]['VpcId'].should.equal(vpc_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['AllocationId'].should.equal(allocation_id) + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['NetworkInterfaceId'].should.equal(eni_id) + assert describe_response['NatGateways'][0][ + 'NatGatewayAddresses'][0]['PrivateIp'].startswith('10.') + describe_response['NatGateways'][0]['NatGatewayAddresses'][ + 0]['PublicIp'].should.equal(public_ip) diff --git a/tests/test_ec2/test_network_acls.py b/tests/test_ec2/test_network_acls.py index fd2ec105e..ad3222b8a 100644 --- a/tests/test_ec2/test_network_acls.py +++ b/tests/test_ec2/test_network_acls.py @@ -1,175 +1,175 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_default_network_acl_created_with_vpc(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_network_acls(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - network_acl = conn.create_network_acl(vpc.id) - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - -@mock_ec2_deprecated -def test_new_subnet_associates_with_default_network_acl(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.get_all_vpcs()[0] - - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(1) - - acl = all_network_acls[0] - acl.associations.should.have.length_of(4) - [a.subnet_id for a in acl.associations].should.contain(subnet.id) - - -@mock_ec2_deprecated -def test_network_acl_entries(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - network_acl_entry = conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(1) - entries[0].rule_number.should.equal('110') - entries[0].protocol.should.equal('6') - entries[0].rule_action.should.equal('ALLOW') - - -@mock_ec2_deprecated -def test_delete_network_acl_entry(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - conn.delete_network_acl_entry( - network_acl.id, 110, False - ) - - all_network_acls = conn.get_all_network_acls() - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_replace_network_acl_entry(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - network_acl = conn.create_network_acl(vpc.id) - - conn.create_network_acl_entry( - network_acl.id, 110, 6, - 'ALLOW', '0.0.0.0/0', False, - port_range_from='443', - port_range_to='443' - ) - conn.replace_network_acl_entry( - network_acl.id, 110, -1, - 'DENY', '0.0.0.0/0', False, - port_range_from='22', - port_range_to='22' - ) - - all_network_acls = conn.get_all_network_acls() - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - entries = test_network_acl.network_acl_entries - entries.should.have.length_of(1) - entries[0].rule_number.should.equal('110') - entries[0].protocol.should.equal('-1') - entries[0].rule_action.should.equal('DENY') - -@mock_ec2_deprecated -def test_associate_new_network_acl_with_subnet(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - network_acl = conn.create_network_acl(vpc.id) - - conn.associate_network_acl(network_acl.id, subnet.id) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - - test_network_acl.associations.should.have.length_of(1) - test_network_acl.associations[0].subnet_id.should.equal(subnet.id) - - -@mock_ec2_deprecated -def test_delete_network_acl(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - network_acl = conn.create_network_acl(vpc.id) - - all_network_acls = conn.get_all_network_acls() - all_network_acls.should.have.length_of(3) - - any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok - - conn.delete_network_acl(network_acl.id) - - updated_network_acls = conn.get_all_network_acls() - updated_network_acls.should.have.length_of(2) - - any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok - - -@mock_ec2_deprecated -def test_network_acl_tagging(): - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - network_acl = conn.create_network_acl(vpc.id) - - network_acl.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - all_network_acls = conn.get_all_network_acls() - test_network_acl = next(na for na in all_network_acls - if na.id == network_acl.id) - test_network_acl.tags.should.have.length_of(1) - test_network_acl.tags["a key"].should.equal("some value") +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_default_network_acl_created_with_vpc(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_network_acls(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + network_acl = conn.create_network_acl(vpc.id) + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + +@mock_ec2_deprecated +def test_new_subnet_associates_with_default_network_acl(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.get_all_vpcs()[0] + + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(1) + + acl = all_network_acls[0] + acl.associations.should.have.length_of(4) + [a.subnet_id for a in acl.associations].should.contain(subnet.id) + + +@mock_ec2_deprecated +def test_network_acl_entries(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + network_acl_entry = conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(1) + entries[0].rule_number.should.equal('110') + entries[0].protocol.should.equal('6') + entries[0].rule_action.should.equal('ALLOW') + + +@mock_ec2_deprecated +def test_delete_network_acl_entry(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + conn.delete_network_acl_entry( + network_acl.id, 110, False + ) + + all_network_acls = conn.get_all_network_acls() + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_replace_network_acl_entry(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + network_acl = conn.create_network_acl(vpc.id) + + conn.create_network_acl_entry( + network_acl.id, 110, 6, + 'ALLOW', '0.0.0.0/0', False, + port_range_from='443', + port_range_to='443' + ) + conn.replace_network_acl_entry( + network_acl.id, 110, -1, + 'DENY', '0.0.0.0/0', False, + port_range_from='22', + port_range_to='22' + ) + + all_network_acls = conn.get_all_network_acls() + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + entries = test_network_acl.network_acl_entries + entries.should.have.length_of(1) + entries[0].rule_number.should.equal('110') + entries[0].protocol.should.equal('-1') + entries[0].rule_action.should.equal('DENY') + +@mock_ec2_deprecated +def test_associate_new_network_acl_with_subnet(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + network_acl = conn.create_network_acl(vpc.id) + + conn.associate_network_acl(network_acl.id, subnet.id) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + + test_network_acl.associations.should.have.length_of(1) + test_network_acl.associations[0].subnet_id.should.equal(subnet.id) + + +@mock_ec2_deprecated +def test_delete_network_acl(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + network_acl = conn.create_network_acl(vpc.id) + + all_network_acls = conn.get_all_network_acls() + all_network_acls.should.have.length_of(3) + + any(acl.id == network_acl.id for acl in all_network_acls).should.be.ok + + conn.delete_network_acl(network_acl.id) + + updated_network_acls = conn.get_all_network_acls() + updated_network_acls.should.have.length_of(2) + + any(acl.id == network_acl.id for acl in updated_network_acls).shouldnt.be.ok + + +@mock_ec2_deprecated +def test_network_acl_tagging(): + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + network_acl = conn.create_network_acl(vpc.id) + + network_acl.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + all_network_acls = conn.get_all_network_acls() + test_network_acl = next(na for na in all_network_acls + if na.id == network_acl.id) + test_network_acl.tags.should.have.length_of(1) + test_network_acl.tags["a key"].should.equal("some value") diff --git a/tests/test_ec2/test_placement_groups.py b/tests/test_ec2/test_placement_groups.py index c7494228a..bc389488b 100644 --- a/tests/test_ec2/test_placement_groups.py +++ b/tests/test_ec2/test_placement_groups.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_placement_groups(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_placement_groups(): + pass diff --git a/tests/test_ec2/test_regions.py b/tests/test_ec2/test_regions.py index 1e87b253c..7f0ea2f18 100644 --- a/tests/test_ec2/test_regions.py +++ b/tests/test_ec2/test_regions.py @@ -1,148 +1,148 @@ -from __future__ import unicode_literals -import boto.ec2 -import boto.ec2.autoscale -import boto.ec2.elb -import sure -from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated - -from moto.ec2 import ec2_backends - -def test_use_boto_regions(): - boto_regions = {r.name for r in boto.ec2.regions()} - moto_regions = set(ec2_backends) - - moto_regions.should.equal(boto_regions) - -def add_servers_to_region(ami_id, count, region): - conn = boto.ec2.connect_to_region(region) - for index in range(count): - conn.run_instances(ami_id) - -@mock_ec2_deprecated -def test_add_servers_to_a_single_region(): - region = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region) - add_servers_to_region('ami-5678efgh', 1, region) - - conn = boto.ec2.connect_to_region(region) - reservations = conn.get_all_instances() - len(reservations).should.equal(2) - reservations.sort(key=lambda x: x.instances[0].image_id) - - reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - reservations[1].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_ec2_deprecated -def test_add_servers_to_multiple_regions(): - region1 = 'us-east-1' - region2 = 'ap-northeast-1' - add_servers_to_region('ami-1234abcd', 1, region1) - add_servers_to_region('ami-5678efgh', 1, region2) - - us_conn = boto.ec2.connect_to_region(region1) - ap_conn = boto.ec2.connect_to_region(region2) - us_reservations = us_conn.get_all_instances() - ap_reservations = ap_conn.get_all_instances() - - len(us_reservations).should.equal(1) - len(ap_reservations).should.equal(1) - - us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') - ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') - - -@mock_autoscaling_deprecated -@mock_elb_deprecated -def test_create_autoscaling_group(): - elb_conn = boto.ec2.elb.connect_to_region('us-east-1') - elb_conn.create_load_balancer( - 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') - elb_conn.create_load_balancer( - 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) - - us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='us_tester', - image_id='ami-abcd1234', - instance_type='m1.small', - ) - us_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='us_tester_group', - availability_zones=['us-east-1c'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["us_test_lb"], - placement_group="us_test_placement", - vpc_zone_identifier='subnet-1234abcd', - termination_policies=["OldestInstance", "NewestInstance"], - ) - us_conn.create_auto_scaling_group(group) - - ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') - config = boto.ec2.autoscale.LaunchConfiguration( - name='ap_tester', - image_id='ami-efgh5678', - instance_type='m1.small', - ) - ap_conn.create_launch_configuration(config) - - group = boto.ec2.autoscale.AutoScalingGroup( - name='ap_tester_group', - availability_zones=['ap-northeast-1a'], - default_cooldown=60, - desired_capacity=2, - health_check_period=100, - health_check_type="EC2", - max_size=2, - min_size=2, - launch_config=config, - load_balancers=["ap_test_lb"], - placement_group="ap_test_placement", - vpc_zone_identifier='subnet-5678efgh', - termination_policies=["OldestInstance", "NewestInstance"], - ) - ap_conn.create_auto_scaling_group(group) - - len(us_conn.get_all_groups()).should.equal(1) - len(ap_conn.get_all_groups()).should.equal(1) - - us_group = us_conn.get_all_groups()[0] - us_group.name.should.equal('us_tester_group') - list(us_group.availability_zones).should.equal(['us-east-1c']) - us_group.desired_capacity.should.equal(2) - us_group.max_size.should.equal(2) - us_group.min_size.should.equal(2) - us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') - us_group.launch_config_name.should.equal('us_tester') - us_group.default_cooldown.should.equal(60) - us_group.health_check_period.should.equal(100) - us_group.health_check_type.should.equal("EC2") - list(us_group.load_balancers).should.equal(["us_test_lb"]) - us_group.placement_group.should.equal("us_test_placement") - list(us_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) - - ap_group = ap_conn.get_all_groups()[0] - ap_group.name.should.equal('ap_tester_group') - list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) - ap_group.desired_capacity.should.equal(2) - ap_group.max_size.should.equal(2) - ap_group.min_size.should.equal(2) - ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') - ap_group.launch_config_name.should.equal('ap_tester') - ap_group.default_cooldown.should.equal(60) - ap_group.health_check_period.should.equal(100) - ap_group.health_check_type.should.equal("EC2") - list(ap_group.load_balancers).should.equal(["ap_test_lb"]) - ap_group.placement_group.should.equal("ap_test_placement") - list(ap_group.termination_policies).should.equal( - ["OldestInstance", "NewestInstance"]) +from __future__ import unicode_literals +import boto.ec2 +import boto.ec2.autoscale +import boto.ec2.elb +import sure +from moto import mock_ec2_deprecated, mock_autoscaling_deprecated, mock_elb_deprecated + +from moto.ec2 import ec2_backends + +def test_use_boto_regions(): + boto_regions = {r.name for r in boto.ec2.regions()} + moto_regions = set(ec2_backends) + + moto_regions.should.equal(boto_regions) + +def add_servers_to_region(ami_id, count, region): + conn = boto.ec2.connect_to_region(region) + for index in range(count): + conn.run_instances(ami_id) + +@mock_ec2_deprecated +def test_add_servers_to_a_single_region(): + region = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region) + add_servers_to_region('ami-5678efgh', 1, region) + + conn = boto.ec2.connect_to_region(region) + reservations = conn.get_all_instances() + len(reservations).should.equal(2) + reservations.sort(key=lambda x: x.instances[0].image_id) + + reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + reservations[1].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_ec2_deprecated +def test_add_servers_to_multiple_regions(): + region1 = 'us-east-1' + region2 = 'ap-northeast-1' + add_servers_to_region('ami-1234abcd', 1, region1) + add_servers_to_region('ami-5678efgh', 1, region2) + + us_conn = boto.ec2.connect_to_region(region1) + ap_conn = boto.ec2.connect_to_region(region2) + us_reservations = us_conn.get_all_instances() + ap_reservations = ap_conn.get_all_instances() + + len(us_reservations).should.equal(1) + len(ap_reservations).should.equal(1) + + us_reservations[0].instances[0].image_id.should.equal('ami-1234abcd') + ap_reservations[0].instances[0].image_id.should.equal('ami-5678efgh') + + +@mock_autoscaling_deprecated +@mock_elb_deprecated +def test_create_autoscaling_group(): + elb_conn = boto.ec2.elb.connect_to_region('us-east-1') + elb_conn.create_load_balancer( + 'us_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + elb_conn = boto.ec2.elb.connect_to_region('ap-northeast-1') + elb_conn.create_load_balancer( + 'ap_test_lb', zones=[], listeners=[(80, 8080, 'http')]) + + us_conn = boto.ec2.autoscale.connect_to_region('us-east-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='us_tester', + image_id='ami-abcd1234', + instance_type='m1.small', + ) + us_conn.create_launch_configuration(config) + + group = boto.ec2.autoscale.AutoScalingGroup( + name='us_tester_group', + availability_zones=['us-east-1c'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["us_test_lb"], + placement_group="us_test_placement", + vpc_zone_identifier='subnet-1234abcd', + termination_policies=["OldestInstance", "NewestInstance"], + ) + us_conn.create_auto_scaling_group(group) + + ap_conn = boto.ec2.autoscale.connect_to_region('ap-northeast-1') + config = boto.ec2.autoscale.LaunchConfiguration( + name='ap_tester', + image_id='ami-efgh5678', + instance_type='m1.small', + ) + ap_conn.create_launch_configuration(config) + + group = boto.ec2.autoscale.AutoScalingGroup( + name='ap_tester_group', + availability_zones=['ap-northeast-1a'], + default_cooldown=60, + desired_capacity=2, + health_check_period=100, + health_check_type="EC2", + max_size=2, + min_size=2, + launch_config=config, + load_balancers=["ap_test_lb"], + placement_group="ap_test_placement", + vpc_zone_identifier='subnet-5678efgh', + termination_policies=["OldestInstance", "NewestInstance"], + ) + ap_conn.create_auto_scaling_group(group) + + len(us_conn.get_all_groups()).should.equal(1) + len(ap_conn.get_all_groups()).should.equal(1) + + us_group = us_conn.get_all_groups()[0] + us_group.name.should.equal('us_tester_group') + list(us_group.availability_zones).should.equal(['us-east-1c']) + us_group.desired_capacity.should.equal(2) + us_group.max_size.should.equal(2) + us_group.min_size.should.equal(2) + us_group.vpc_zone_identifier.should.equal('subnet-1234abcd') + us_group.launch_config_name.should.equal('us_tester') + us_group.default_cooldown.should.equal(60) + us_group.health_check_period.should.equal(100) + us_group.health_check_type.should.equal("EC2") + list(us_group.load_balancers).should.equal(["us_test_lb"]) + us_group.placement_group.should.equal("us_test_placement") + list(us_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) + + ap_group = ap_conn.get_all_groups()[0] + ap_group.name.should.equal('ap_tester_group') + list(ap_group.availability_zones).should.equal(['ap-northeast-1a']) + ap_group.desired_capacity.should.equal(2) + ap_group.max_size.should.equal(2) + ap_group.min_size.should.equal(2) + ap_group.vpc_zone_identifier.should.equal('subnet-5678efgh') + ap_group.launch_config_name.should.equal('ap_tester') + ap_group.default_cooldown.should.equal(60) + ap_group.health_check_period.should.equal(100) + ap_group.health_check_type.should.equal("EC2") + list(ap_group.load_balancers).should.equal(["ap_test_lb"]) + ap_group.placement_group.should.equal("ap_test_placement") + list(ap_group.termination_policies).should.equal( + ["OldestInstance", "NewestInstance"]) diff --git a/tests/test_ec2/test_reserved_instances.py b/tests/test_ec2/test_reserved_instances.py index 437d3a95b..47456bc03 100644 --- a/tests/test_ec2/test_reserved_instances.py +++ b/tests/test_ec2/test_reserved_instances.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_reserved_instances(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_reserved_instances(): + pass diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index b27484468..e6f767a0a 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -1,530 +1,530 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises - -import boto -import boto3 -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from tests.helpers import requires_boto_gte - - -@mock_ec2_deprecated -def test_route_tables_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - main_route_table = all_route_tables[0] - main_route_table.vpc_id.should.equal(vpc.id) - - routes = main_route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - vpc.delete() - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_route_tables_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(2) - all_route_tables[0].vpc_id.should.equal(vpc.id) - all_route_tables[1].vpc_id.should.equal(vpc.id) - - all_route_table_ids = [route_table.id for route_table in all_route_tables] - all_route_table_ids.should.contain(route_table.id) - - routes = route_table.routes - routes.should.have.length_of(1) - - local_route = routes[0] - local_route.gateway_id.should.equal('local') - local_route.state.should.equal('active') - local_route.destination_cidr_block.should.equal(vpc.cidr_block) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc(vpc.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - conn.delete_route_table(route_table.id) - - all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) - all_route_tables.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table("rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_tables_filters_standard(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc1 = conn.create_vpc("10.0.0.0/16") - route_table1 = conn.create_route_table(vpc1.id) - - vpc2 = conn.create_vpc("10.0.0.0/16") - route_table2 = conn.create_route_table(vpc2.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(5) - - # Filter by main route table - main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true'}) - main_route_tables.should.have.length_of(3) - main_route_table_ids = [ - route_table.id for route_table in main_route_tables] - main_route_table_ids.should_not.contain(route_table1.id) - main_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC - vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) - vpc1_route_tables.should.have.length_of(2) - vpc1_route_table_ids = [ - route_table.id for route_table in vpc1_route_tables] - vpc1_route_table_ids.should.contain(route_table1.id) - vpc1_route_table_ids.should_not.contain(route_table2.id) - - # Filter by VPC and main route table - vpc2_main_route_tables = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc2.id}) - vpc2_main_route_tables.should.have.length_of(1) - vpc2_main_route_table_ids = [ - route_table.id for route_table in vpc2_main_route_tables] - vpc2_main_route_table_ids.should_not.contain(route_table1.id) - vpc2_main_route_table_ids.should_not.contain(route_table2.id) - - # Unsupported filter - conn.get_all_route_tables.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -def test_route_tables_filters_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") - subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) - association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) - association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Filter by association ID - association1_route_tables = conn.get_all_route_tables( - filters={'association.route-table-association-id': association_id1}) - association1_route_tables.should.have.length_of(1) - association1_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - # Filter by route table ID - route_table2_route_tables = conn.get_all_route_tables( - filters={'association.route-table-id': route_table2.id}) - route_table2_route_tables.should.have.length_of(1) - route_table2_route_tables[0].id.should.equal(route_table2.id) - route_table2_route_tables[0].associations.should.have.length_of(1) - - # Filter by subnet ID - subnet_route_tables = conn.get_all_route_tables( - filters={'association.subnet-id': subnet1.id}) - subnet_route_tables.should.have.length_of(1) - subnet_route_tables[0].id.should.equal(route_table1.id) - association1_route_tables[0].associations.should.have.length_of(2) - - -@mock_ec2_deprecated -def test_route_table_associations(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(3) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Associate - association_id = conn.associate_route_table(route_table.id, subnet.id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(1) - - route_table.associations[0].id.should.equal(association_id) - route_table.associations[0].main.should.equal(False) - route_table.associations[0].route_table_id.should.equal(route_table.id) - route_table.associations[0].subnet_id.should.equal(subnet.id) - - # Associate is idempotent - association_id_idempotent = conn.associate_route_table( - route_table.id, subnet.id) - association_id_idempotent.should.equal(association_id) - - # Error: Attempt delete associated route table. - with assert_raises(EC2ResponseError) as cm: - conn.delete_route_table(route_table.id) - cm.exception.code.should.equal('DependencyViolation') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Disassociate - conn.disassociate_route_table(association_id) - - # Refresh - route_table = conn.get_all_route_tables(route_table.id)[0] - route_table.associations.should.have.length_of(0) - - # Error: Disassociate with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.disassociate_route_table(association_id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid subnet ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table(route_table.id, "subnet-1234abcd") - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Associate with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.associate_route_table("rtb-1234abcd", subnet.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.16.0") -@mock_ec2_deprecated -def test_route_table_replace_route_table_association(): - """ - Note: Boto has deprecated replace_route_table_assocation (which returns status) - and now uses replace_route_table_assocation_with_assoc (which returns association ID). - """ - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - route_table1 = conn.create_route_table(vpc.id) - route_table2 = conn.create_route_table(vpc.id) - - all_route_tables = conn.get_all_route_tables() - all_route_tables.should.have.length_of(4) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table1.associations.should.have.length_of(0) - - # Associate - association_id1 = conn.associate_route_table(route_table1.id, subnet.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(1) - route_table2.associations.should.have.length_of(0) - - route_table1.associations[0].id.should.equal(association_id1) - route_table1.associations[0].main.should.equal(False) - route_table1.associations[0].route_table_id.should.equal(route_table1.id) - route_table1.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association - association_id2 = conn.replace_route_table_association_with_assoc( - association_id1, route_table2.id) - - # Refresh - route_table1 = conn.get_all_route_tables(route_table1.id)[0] - route_table2 = conn.get_all_route_tables(route_table2.id)[0] - - # Validate - route_table1.associations.should.have.length_of(0) - route_table2.associations.should.have.length_of(1) - - route_table2.associations[0].id.should.equal(association_id2) - route_table2.associations[0].main.should.equal(False) - route_table2.associations[0].route_table_id.should.equal(route_table2.id) - route_table2.associations[0].subnet_id.should.equal(subnet.id) - - # Replace Association is idempotent - association_id_idempotent = conn.replace_route_table_association_with_assoc( - association_id2, route_table2.id) - association_id_idempotent.should.equal(association_id2) - - # Error: Replace association with invalid association ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - "rtbassoc-1234abcd", route_table1.id) - cm.exception.code.should.equal('InvalidAssociationID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Error: Replace association with invalid route table ID - with assert_raises(EC2ResponseError) as cm: - conn.replace_route_table_association_with_assoc( - association_id2, "rtb-1234abcd") - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_route_table_get_by_tag(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpc = conn.create_vpc('10.0.0.0/16') - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag('Name', 'TestRouteTable') - - route_tables = conn.get_all_route_tables( - filters={'tag:Name': 'TestRouteTable'}) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags['Name'].should.equal('TestRouteTable') - - -@mock_ec2 -def test_route_table_get_by_tag_boto3(): - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - route_table = ec2.create_route_table(VpcId=vpc.id) - route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) - - filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] - route_tables = list(ec2.route_tables.filter(Filters=filters)) - - route_tables.should.have.length_of(1) - route_tables[0].vpc_id.should.equal(vpc.id) - route_tables[0].id.should.equal(route_table.id) - route_tables[0].tags.should.have.length_of(1) - route_tables[0].tags[0].should.equal( - {'Key': 'Name', 'Value': 'TestRouteTable'}) - - -@mock_ec2_deprecated -def test_routes_additional(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(2) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(igw.id) - new_route.instance_id.should.be.none - new_route.state.should.equal('active') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.delete_route(main_route_table.id, ROUTE_CIDR) - - main_route_table = conn.get_all_route_tables( - filters={'vpc-id': vpc.id})[0] # Refresh route table - - main_route_table.routes.should.have.length_of(1) - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_route(main_route_table.id, ROUTE_CIDR) - cm.exception.code.should.equal('InvalidRoute.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_routes_replace(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - # Various route targets - igw = conn.create_internet_gateway() - - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - # Create initial route - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - # Replace... - def get_target_route(): - route_table = conn.get_all_route_tables(main_route_table.id)[0] - routes = [ - route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] - routes.should.have.length_of(1) - return routes[0] - - conn.replace_route(main_route_table.id, ROUTE_CIDR, - instance_id=instance.id) - - target_route = get_target_route() - target_route.gateway_id.should.be.none - target_route.instance_id.should.equal(instance.id) - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - - target_route = get_target_route() - target_route.gateway_id.should.equal(igw.id) - target_route.instance_id.should.be.none - target_route.state.should.equal('active') - target_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - with assert_raises(EC2ResponseError) as cm: - conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) - cm.exception.code.should.equal('InvalidRouteTableID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@requires_boto_gte("2.19.0") -@mock_ec2_deprecated -def test_routes_not_supported(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables()[0] - local_route = main_route_table.routes[0] - igw = conn.create_internet_gateway() - ROUTE_CIDR = "10.0.0.4/24" - - # Create - conn.create_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - # Replace - igw = conn.create_internet_gateway() - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) - conn.replace_route.when.called_with( - main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpc_peering_connection(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - local_route = main_route_table.routes[0] - ROUTE_CIDR = "10.0.0.4/24" - - peer_vpc = conn.create_vpc("11.0.0.0/16") - vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - - conn.create_route(main_route_table.id, ROUTE_CIDR, - vpc_peering_connection_id=vpc_pcx.id) - - # Refresh route table - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.be.none - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) - new_route.state.should.equal('blackhole') - new_route.destination_cidr_block.should.equal(ROUTE_CIDR) - - -@requires_boto_gte("2.34.0") -@mock_ec2_deprecated -def test_routes_vpn_gateway(): - - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - main_route_table = conn.get_all_route_tables( - filters={'association.main': 'true', 'vpc-id': vpc.id})[0] - ROUTE_CIDR = "10.0.0.4/24" - - vpn_gw = conn.create_vpn_gateway(type="ipsec.1") - - conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) - - main_route_table = conn.get_all_route_tables(main_route_table.id)[0] - new_routes = [ - route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] - new_routes.should.have.length_of(1) - - new_route = new_routes[0] - new_route.gateway_id.should.equal(vpn_gw.id) - new_route.instance_id.should.be.none - new_route.vpc_peering_connection_id.should.be.none - - -@mock_ec2_deprecated -def test_network_acl_tagging(): - - conn = boto.connect_vpc('the_key', 'the secret') - vpc = conn.create_vpc("10.0.0.0/16") - - route_table = conn.create_route_table(vpc.id) - route_table.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - all_route_tables = conn.get_all_route_tables() - test_route_table = next(na for na in all_route_tables - if na.id == route_table.id) - test_route_table.tags.should.have.length_of(1) - test_route_table.tags["a key"].should.equal("some value") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +import boto +import boto3 +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from tests.helpers import requires_boto_gte + + +@mock_ec2_deprecated +def test_route_tables_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + main_route_table = all_route_tables[0] + main_route_table.vpc_id.should.equal(vpc.id) + + routes = main_route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + vpc.delete() + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_route_tables_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(2) + all_route_tables[0].vpc_id.should.equal(vpc.id) + all_route_tables[1].vpc_id.should.equal(vpc.id) + + all_route_table_ids = [route_table.id for route_table in all_route_tables] + all_route_table_ids.should.contain(route_table.id) + + routes = route_table.routes + routes.should.have.length_of(1) + + local_route = routes[0] + local_route.gateway_id.should.equal('local') + local_route.state.should.equal('active') + local_route.destination_cidr_block.should.equal(vpc.cidr_block) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc(vpc.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + conn.delete_route_table(route_table.id) + + all_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc.id}) + all_route_tables.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table("rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_tables_filters_standard(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc1 = conn.create_vpc("10.0.0.0/16") + route_table1 = conn.create_route_table(vpc1.id) + + vpc2 = conn.create_vpc("10.0.0.0/16") + route_table2 = conn.create_route_table(vpc2.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(5) + + # Filter by main route table + main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true'}) + main_route_tables.should.have.length_of(3) + main_route_table_ids = [ + route_table.id for route_table in main_route_tables] + main_route_table_ids.should_not.contain(route_table1.id) + main_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC + vpc1_route_tables = conn.get_all_route_tables(filters={'vpc-id': vpc1.id}) + vpc1_route_tables.should.have.length_of(2) + vpc1_route_table_ids = [ + route_table.id for route_table in vpc1_route_tables] + vpc1_route_table_ids.should.contain(route_table1.id) + vpc1_route_table_ids.should_not.contain(route_table2.id) + + # Filter by VPC and main route table + vpc2_main_route_tables = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc2.id}) + vpc2_main_route_tables.should.have.length_of(1) + vpc2_main_route_table_ids = [ + route_table.id for route_table in vpc2_main_route_tables] + vpc2_main_route_table_ids.should_not.contain(route_table1.id) + vpc2_main_route_table_ids.should_not.contain(route_table2.id) + + # Unsupported filter + conn.get_all_route_tables.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +def test_route_tables_filters_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc("10.0.0.0/16") + subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + association_id1 = conn.associate_route_table(route_table1.id, subnet1.id) + association_id2 = conn.associate_route_table(route_table1.id, subnet2.id) + association_id3 = conn.associate_route_table(route_table2.id, subnet3.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Filter by association ID + association1_route_tables = conn.get_all_route_tables( + filters={'association.route-table-association-id': association_id1}) + association1_route_tables.should.have.length_of(1) + association1_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + # Filter by route table ID + route_table2_route_tables = conn.get_all_route_tables( + filters={'association.route-table-id': route_table2.id}) + route_table2_route_tables.should.have.length_of(1) + route_table2_route_tables[0].id.should.equal(route_table2.id) + route_table2_route_tables[0].associations.should.have.length_of(1) + + # Filter by subnet ID + subnet_route_tables = conn.get_all_route_tables( + filters={'association.subnet-id': subnet1.id}) + subnet_route_tables.should.have.length_of(1) + subnet_route_tables[0].id.should.equal(route_table1.id) + association1_route_tables[0].associations.should.have.length_of(2) + + +@mock_ec2_deprecated +def test_route_table_associations(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(3) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Associate + association_id = conn.associate_route_table(route_table.id, subnet.id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(1) + + route_table.associations[0].id.should.equal(association_id) + route_table.associations[0].main.should.equal(False) + route_table.associations[0].route_table_id.should.equal(route_table.id) + route_table.associations[0].subnet_id.should.equal(subnet.id) + + # Associate is idempotent + association_id_idempotent = conn.associate_route_table( + route_table.id, subnet.id) + association_id_idempotent.should.equal(association_id) + + # Error: Attempt delete associated route table. + with assert_raises(EC2ResponseError) as cm: + conn.delete_route_table(route_table.id) + cm.exception.code.should.equal('DependencyViolation') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Disassociate + conn.disassociate_route_table(association_id) + + # Refresh + route_table = conn.get_all_route_tables(route_table.id)[0] + route_table.associations.should.have.length_of(0) + + # Error: Disassociate with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.disassociate_route_table(association_id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid subnet ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table(route_table.id, "subnet-1234abcd") + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Associate with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.associate_route_table("rtb-1234abcd", subnet.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.16.0") +@mock_ec2_deprecated +def test_route_table_replace_route_table_association(): + """ + Note: Boto has deprecated replace_route_table_assocation (which returns status) + and now uses replace_route_table_assocation_with_assoc (which returns association ID). + """ + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + route_table1 = conn.create_route_table(vpc.id) + route_table2 = conn.create_route_table(vpc.id) + + all_route_tables = conn.get_all_route_tables() + all_route_tables.should.have.length_of(4) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table1.associations.should.have.length_of(0) + + # Associate + association_id1 = conn.associate_route_table(route_table1.id, subnet.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(1) + route_table2.associations.should.have.length_of(0) + + route_table1.associations[0].id.should.equal(association_id1) + route_table1.associations[0].main.should.equal(False) + route_table1.associations[0].route_table_id.should.equal(route_table1.id) + route_table1.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association + association_id2 = conn.replace_route_table_association_with_assoc( + association_id1, route_table2.id) + + # Refresh + route_table1 = conn.get_all_route_tables(route_table1.id)[0] + route_table2 = conn.get_all_route_tables(route_table2.id)[0] + + # Validate + route_table1.associations.should.have.length_of(0) + route_table2.associations.should.have.length_of(1) + + route_table2.associations[0].id.should.equal(association_id2) + route_table2.associations[0].main.should.equal(False) + route_table2.associations[0].route_table_id.should.equal(route_table2.id) + route_table2.associations[0].subnet_id.should.equal(subnet.id) + + # Replace Association is idempotent + association_id_idempotent = conn.replace_route_table_association_with_assoc( + association_id2, route_table2.id) + association_id_idempotent.should.equal(association_id2) + + # Error: Replace association with invalid association ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + "rtbassoc-1234abcd", route_table1.id) + cm.exception.code.should.equal('InvalidAssociationID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Error: Replace association with invalid route table ID + with assert_raises(EC2ResponseError) as cm: + conn.replace_route_table_association_with_assoc( + association_id2, "rtb-1234abcd") + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_route_table_get_by_tag(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpc = conn.create_vpc('10.0.0.0/16') + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag('Name', 'TestRouteTable') + + route_tables = conn.get_all_route_tables( + filters={'tag:Name': 'TestRouteTable'}) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags['Name'].should.equal('TestRouteTable') + + +@mock_ec2 +def test_route_table_get_by_tag_boto3(): + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + route_table = ec2.create_route_table(VpcId=vpc.id) + route_table.create_tags(Tags=[{'Key': 'Name', 'Value': 'TestRouteTable'}]) + + filters = [{'Name': 'tag:Name', 'Values': ['TestRouteTable']}] + route_tables = list(ec2.route_tables.filter(Filters=filters)) + + route_tables.should.have.length_of(1) + route_tables[0].vpc_id.should.equal(vpc.id) + route_tables[0].id.should.equal(route_table.id) + route_tables[0].tags.should.have.length_of(1) + route_tables[0].tags[0].should.equal( + {'Key': 'Name', 'Value': 'TestRouteTable'}) + + +@mock_ec2_deprecated +def test_routes_additional(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables(filters={'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(2) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(igw.id) + new_route.instance_id.should.be.none + new_route.state.should.equal('active') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.delete_route(main_route_table.id, ROUTE_CIDR) + + main_route_table = conn.get_all_route_tables( + filters={'vpc-id': vpc.id})[0] # Refresh route table + + main_route_table.routes.should.have.length_of(1) + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_route(main_route_table.id, ROUTE_CIDR) + cm.exception.code.should.equal('InvalidRoute.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_routes_replace(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + # Various route targets + igw = conn.create_internet_gateway() + + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + # Create initial route + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + # Replace... + def get_target_route(): + route_table = conn.get_all_route_tables(main_route_table.id)[0] + routes = [ + route for route in route_table.routes if route.destination_cidr_block != vpc.cidr_block] + routes.should.have.length_of(1) + return routes[0] + + conn.replace_route(main_route_table.id, ROUTE_CIDR, + instance_id=instance.id) + + target_route = get_target_route() + target_route.gateway_id.should.be.none + target_route.instance_id.should.equal(instance.id) + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + conn.replace_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + + target_route = get_target_route() + target_route.gateway_id.should.equal(igw.id) + target_route.instance_id.should.be.none + target_route.state.should.equal('active') + target_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + with assert_raises(EC2ResponseError) as cm: + conn.replace_route('rtb-1234abcd', ROUTE_CIDR, gateway_id=igw.id) + cm.exception.code.should.equal('InvalidRouteTableID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@requires_boto_gte("2.19.0") +@mock_ec2_deprecated +def test_routes_not_supported(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables()[0] + local_route = main_route_table.routes[0] + igw = conn.create_internet_gateway() + ROUTE_CIDR = "10.0.0.4/24" + + # Create + conn.create_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + # Replace + igw = conn.create_internet_gateway() + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=igw.id) + conn.replace_route.when.called_with( + main_route_table.id, ROUTE_CIDR, interface_id='eni-1234abcd').should.throw(NotImplementedError) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpc_peering_connection(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + local_route = main_route_table.routes[0] + ROUTE_CIDR = "10.0.0.4/24" + + peer_vpc = conn.create_vpc("11.0.0.0/16") + vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) + + conn.create_route(main_route_table.id, ROUTE_CIDR, + vpc_peering_connection_id=vpc_pcx.id) + + # Refresh route table + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.be.none + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.equal(vpc_pcx.id) + new_route.state.should.equal('blackhole') + new_route.destination_cidr_block.should.equal(ROUTE_CIDR) + + +@requires_boto_gte("2.34.0") +@mock_ec2_deprecated +def test_routes_vpn_gateway(): + + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + main_route_table = conn.get_all_route_tables( + filters={'association.main': 'true', 'vpc-id': vpc.id})[0] + ROUTE_CIDR = "10.0.0.4/24" + + vpn_gw = conn.create_vpn_gateway(type="ipsec.1") + + conn.create_route(main_route_table.id, ROUTE_CIDR, gateway_id=vpn_gw.id) + + main_route_table = conn.get_all_route_tables(main_route_table.id)[0] + new_routes = [ + route for route in main_route_table.routes if route.destination_cidr_block != vpc.cidr_block] + new_routes.should.have.length_of(1) + + new_route = new_routes[0] + new_route.gateway_id.should.equal(vpn_gw.id) + new_route.instance_id.should.be.none + new_route.vpc_peering_connection_id.should.be.none + + +@mock_ec2_deprecated +def test_network_acl_tagging(): + + conn = boto.connect_vpc('the_key', 'the secret') + vpc = conn.create_vpc("10.0.0.0/16") + + route_table = conn.create_route_table(vpc.id) + route_table.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + all_route_tables = conn.get_all_route_tables() + test_route_table = next(na for na in all_route_tables + if na.id == route_table.id) + test_route_table.tags.should.have.length_of(1) + test_route_table.tags["a key"].should.equal("some value") diff --git a/tests/test_ec2/test_security_groups.py b/tests/test_ec2/test_security_groups.py index d843087a6..15be94fbe 100644 --- a/tests/test_ec2/test_security_groups.py +++ b/tests/test_ec2/test_security_groups.py @@ -1,737 +1,737 @@ -from __future__ import unicode_literals - -import copy - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -from botocore.exceptions import ClientError -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_and_describe_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as ex: - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - security_group = conn.create_security_group( - 'test security group', 'this is a test security group') - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group') - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups() - # The default group gets created automatically - all_groups.should.have.length_of(3) - group_names = [group.name for group in all_groups] - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_create_security_group_without_description_raises_error(): - conn = boto.connect_ec2('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group('test security group', '') - cm.exception.code.should.equal('MissingParameter') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_default_security_group(): - conn = boto.ec2.connect_to_region('us-east-1') - groups = conn.get_all_security_groups() - groups.should.have.length_of(2) - groups[0].name.should.equal("default") - - -@mock_ec2_deprecated -def test_create_and_describe_vpc_security_group(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - - security_group.name.should.equal('test security group') - security_group.description.should.equal('this is a test security group') - - # Trying to create another group with the same name in the same VPC should - # throw an error - with assert_raises(EC2ResponseError) as cm: - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - cm.exception.code.should.equal('InvalidGroup.Duplicate') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) - - all_groups[0].vpc_id.should.equal(vpc_id) - - all_groups.should.have.length_of(1) - all_groups[0].name.should.equal('test security group') - - -@mock_ec2_deprecated -def test_create_two_security_groups_with_same_name_in_different_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - vpc_id2 = 'vpc-5300000d' - - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id) - conn.create_security_group( - 'test security group', 'this is a test security group', vpc_id2) - - all_groups = conn.get_all_security_groups() - - all_groups.should.have.length_of(4) - group_names = [group.name for group in all_groups] - # The default group is created automatically - set(group_names).should.equal(set(["default", "test security group"])) - - -@mock_ec2_deprecated -def test_deleting_security_groups(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group1 = conn.create_security_group('test1', 'test1') - conn.create_security_group('test2', 'test2') - - conn.get_all_security_groups().should.have.length_of(4) - - # Deleting a group that doesn't exist should throw an error - with assert_raises(EC2ResponseError) as cm: - conn.delete_security_group('foobar') - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Delete by name - with assert_raises(EC2ResponseError) as ex: - conn.delete_security_group('test2', dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') - - conn.delete_security_group('test2') - conn.get_all_security_groups().should.have.length_of(3) - - # Delete by group id - conn.delete_security_group(group_id=security_group1.id) - conn.get_all_security_groups().should.have.length_of(2) - - -@mock_ec2_deprecated -def test_delete_security_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - security_group1 = conn.create_security_group('test1', 'test1', vpc_id) - - # this should not throw an exception - conn.delete_security_group(group_id=security_group1.id) - - -@mock_ec2_deprecated -def test_authorize_ip_range_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - - with assert_raises(EC2ResponseError) as ex: - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - - security_group = conn.get_all_security_groups(groupnames=['test'])[0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.122/32") - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') - - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", cidr_ip="123.123.123.123/32") - - security_group = conn.get_all_security_groups()[0] - security_group.rules.should.have.length_of(0) - - # Test for egress as well - egress_security_group = conn.create_security_group( - 'testegress', 'testegress', vpc_id='vpc-3432589') - - with assert_raises(EC2ResponseError) as ex: - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - success = conn.authorize_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - assert success.should.be.true - egress_security_group = conn.get_all_security_groups( - groupnames='testegress')[0] - # There are two egress rules associated with the security group: - # the default outbound rule and the new one - int(egress_security_group.rules_egress[1].to_port).should.equal(2222) - egress_security_group.rules_egress[1].grants[ - 0].cidr_ip.should.equal("123.123.123.123/32") - - # Wrong Cidr should throw error - egress_security_group.revoke.when.called_with( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) - - # Actually revoke - with assert_raises(EC2ResponseError) as ex: - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') - - conn.revoke_security_group_egress( - egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") - - egress_security_group = conn.get_all_security_groups()[0] - # There is still the default outbound rule - egress_security_group.rules_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_other_group_and_revoke(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - other_security_group = conn.create_security_group('other', 'other') - wrong_group = conn.create_security_group('wrong', 'wrong') - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - assert success.should.be.true - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Wrong source group should throw error - with assert_raises(EC2ResponseError) as cm: - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=wrong_group) - cm.exception.code.should.equal('InvalidPermission.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - # Actually revoke - security_group.revoke(ip_protocol="tcp", from_port="22", - to_port="2222", src_group=other_security_group) - - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2 -def test_authorize_other_group_egress_and_revoke(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - - ip_permission = { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], - 'IpRanges': [] - } - - sg01.authorize_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(2) - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=[ip_permission]) - sg01.ip_permissions_egress.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_group_in_vpc(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = "vpc-12345" - - # create 2 groups in a vpc - security_group = conn.create_security_group('test1', 'test1', vpc_id) - other_security_group = conn.create_security_group('test2', 'test2', vpc_id) - - success = security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # Check that the rule is accurate - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - int(security_group.rules[0].to_port).should.equal(2222) - security_group.rules[0].grants[ - 0].group_id.should.equal(other_security_group.id) - - # Now remove the rule - success = security_group.revoke( - ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) - success.should.be.true - - # And check that it gets revoked - security_group = [ - group for group in conn.get_all_security_groups() if group.name == 'test1'][0] - security_group.rules.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_get_all_security_groups(): - conn = boto.connect_ec2() - sg1 = conn.create_security_group( - name='test1', description='test1', vpc_id='vpc-mjm05d27') - conn.create_security_group(name='test2', description='test2') - - resp = conn.get_all_security_groups(groupnames=['test1']) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(groupnames=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups(filters={'description': ['test1']}) - resp.should.have.length_of(1) - resp[0].id.should.equal(sg1.id) - - resp = conn.get_all_security_groups() - resp.should.have.length_of(4) - - -@mock_ec2_deprecated -def test_authorize_bad_cidr_throws_invalid_parameter_value(): - conn = boto.connect_ec2('the_key', 'the_secret') - security_group = conn.create_security_group('test', 'test') - with assert_raises(EC2ResponseError) as cm: - security_group.authorize( - ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_security_group_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - sg = conn.create_security_group("test-sg", "Test SG", vpc.id) - - with assert_raises(EC2ResponseError) as ex: - sg.add_tag("Test", "Tag", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - sg.add_tag("Test", "Tag") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("Test") - tag.value.should.equal("Tag") - - group = conn.get_all_security_groups("test-sg")[0] - group.tags.should.have.length_of(1) - group.tags["Test"].should.equal("Tag") - - -@mock_ec2_deprecated -def test_security_group_tag_filtering(): - conn = boto.connect_ec2() - sg = conn.create_security_group("test-sg", "Test SG") - sg.add_tag("test-tag", "test-value") - - groups = conn.get_all_security_groups( - filters={"tag:test-tag": "test-value"}) - groups.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_authorize_all_protocols_with_no_port_specification(): - conn = boto.connect_ec2() - sg = conn.create_security_group('test', 'test') - - success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') - success.should.be.true - - sg = conn.get_all_security_groups('test')[0] - sg.rules[0].from_port.should.equal(None) - sg.rules[0].to_port.should.equal(None) - - -@mock_ec2_deprecated -def test_sec_group_rule_limit(): - ec2_conn = boto.connect_ec2() - sg = ec2_conn.create_security_group('test', 'test') - other_sg = ec2_conn.create_security_group('test_2', 'test_other') - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) - success.should.be.true - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit - for i in range(98): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='101.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -@mock_ec2_deprecated -def test_sec_group_rule_limit_vpc(): - ec2_conn = boto.connect_ec2() - vpc_conn = boto.connect_vpc() - - vpc = vpc_conn.create_vpc('10.0.0.0/8') - - sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) - other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) - - # INGRESS - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - sg.rules.should.be.empty - # authorize a rule targeting a different sec group (because this count too) - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - success.should.be.true - # fill the rules up the limit - success = ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) - # verify that we cannot authorize past the limit for a CIDR IP - success.should.be.true - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group( - group_id=sg.id, ip_protocol='-1', - src_security_group_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - # EGRESS - # authorize a rule targeting a different sec group (because this count too) - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - # fill the rules up the limit - # remember that by default, when created a sec group contains 1 egress rule - # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit - for i in range(48): - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='{0}.0.0.0/0'.format(i)) - # verify that we cannot authorize past the limit for a CIDR IP - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - cidr_ip='50.0.0.0/0') - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - # verify that we cannot authorize past the limit for a different sec group - with assert_raises(EC2ResponseError) as cm: - ec2_conn.authorize_security_group_egress( - group_id=sg.id, ip_protocol='-1', - src_group_id=other_sg.id) - cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') - - -''' -Boto3 -''' - - -@mock_ec2 -def test_add_same_rule_twice_throws_error(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - sg = ec2.create_security_group( - GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] - }, - ] - sg.authorize_ingress(IpPermissions=ip_permissions) - - with assert_raises(ClientError) as ex: - sg.authorize_ingress(IpPermissions=ip_permissions) - - -@mock_ec2 -def test_security_group_tagging_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - - with assert_raises(ClientError) as ex: - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_security_group_wildcard_tag_filter_boto3(): - conn = boto3.client('ec2', region_name='us-east-1') - sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") - conn.create_tags(Resources=[sg['GroupId']], Tags=[ - {'Key': 'Test', 'Value': 'Tag'}]) - describe = conn.describe_security_groups( - Filters=[{'Name': 'tag-value', 'Values': ['*']}]) - - tag = describe["SecurityGroups"][0]['Tags'][0] - tag['Value'].should.equal("Tag") - tag['Key'].should.equal("Test") - - -@mock_ec2 -def test_authorize_and_revoke_in_bulk(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - sg01 = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - sg02 = ec2.create_security_group( - GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) - sg03 = ec2.create_security_group( - GroupName='sg03', Description='Test security group sg03') - - ip_permissions = [ - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', - 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27018, - 'ToPort': 27018, - 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], - 'IpRanges': [] - }, - { - 'IpProtocol': 'tcp', - 'FromPort': 27017, - 'ToPort': 27017, - 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], - 'IpRanges': [] - } - ] - expected_ip_permissions = copy.deepcopy(ip_permissions) - expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' - expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id - - sg01.authorize_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.have.length_of(3) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.should.contain(ip_permission) - - sg01.revoke_ingress(IpPermissions=ip_permissions) - sg01.ip_permissions.should.be.empty - for ip_permission in expected_ip_permissions: - sg01.ip_permissions.shouldnt.contain(ip_permission) - - sg01.authorize_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(4) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.should.contain(ip_permission) - - sg01.revoke_egress(IpPermissions=ip_permissions) - sg01.ip_permissions_egress.should.have.length_of(1) - for ip_permission in expected_ip_permissions: - sg01.ip_permissions_egress.shouldnt.contain(ip_permission) - - -@mock_ec2 -def test_security_group_ingress_without_multirule(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Fails - assert len(sg.ip_permissions) == 1 - - -@mock_ec2 -def test_security_group_ingress_without_multirule_after_reload(): - ec2 = boto3.resource('ec2', 'ca-central-1') - sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') - - assert len(sg.ip_permissions) == 0 - sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') - - # Also Fails - sg_after = ec2.SecurityGroup(sg.id) - assert len(sg_after.ip_permissions) == 1 - - -@mock_ec2_deprecated -def test_get_all_security_groups_filter_with_same_vpc_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - vpc_id = 'vpc-5300000c' - security_group = conn.create_security_group( - 'test1', 'test1', vpc_id=vpc_id) - security_group2 = conn.create_security_group( - 'test2', 'test2', vpc_id=vpc_id) - - security_group.vpc_id.should.equal(vpc_id) - security_group2.vpc_id.should.equal(vpc_id) - - security_groups = conn.get_all_security_groups( - group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) - security_groups.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_security_groups(group_ids=['does_not_exist']) - cm.exception.code.should.equal('InvalidGroup.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none +from __future__ import unicode_literals + +import copy + +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +from botocore.exceptions import ClientError +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_and_describe_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as ex: + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + security_group = conn.create_security_group( + 'test security group', 'this is a test security group') + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group') + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups() + # The default group gets created automatically + all_groups.should.have.length_of(3) + group_names = [group.name for group in all_groups] + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_create_security_group_without_description_raises_error(): + conn = boto.connect_ec2('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group('test security group', '') + cm.exception.code.should.equal('MissingParameter') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_default_security_group(): + conn = boto.ec2.connect_to_region('us-east-1') + groups = conn.get_all_security_groups() + groups.should.have.length_of(2) + groups[0].name.should.equal("default") + + +@mock_ec2_deprecated +def test_create_and_describe_vpc_security_group(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + + security_group.name.should.equal('test security group') + security_group.description.should.equal('this is a test security group') + + # Trying to create another group with the same name in the same VPC should + # throw an error + with assert_raises(EC2ResponseError) as cm: + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + cm.exception.code.should.equal('InvalidGroup.Duplicate') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_groups = conn.get_all_security_groups(filters={'vpc_id': [vpc_id]}) + + all_groups[0].vpc_id.should.equal(vpc_id) + + all_groups.should.have.length_of(1) + all_groups[0].name.should.equal('test security group') + + +@mock_ec2_deprecated +def test_create_two_security_groups_with_same_name_in_different_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + vpc_id2 = 'vpc-5300000d' + + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id) + conn.create_security_group( + 'test security group', 'this is a test security group', vpc_id2) + + all_groups = conn.get_all_security_groups() + + all_groups.should.have.length_of(4) + group_names = [group.name for group in all_groups] + # The default group is created automatically + set(group_names).should.equal(set(["default", "test security group"])) + + +@mock_ec2_deprecated +def test_deleting_security_groups(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group1 = conn.create_security_group('test1', 'test1') + conn.create_security_group('test2', 'test2') + + conn.get_all_security_groups().should.have.length_of(4) + + # Deleting a group that doesn't exist should throw an error + with assert_raises(EC2ResponseError) as cm: + conn.delete_security_group('foobar') + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Delete by name + with assert_raises(EC2ResponseError) as ex: + conn.delete_security_group('test2', dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteSecurityGroup operation: Request would have succeeded, but DryRun flag is set') + + conn.delete_security_group('test2') + conn.get_all_security_groups().should.have.length_of(3) + + # Delete by group id + conn.delete_security_group(group_id=security_group1.id) + conn.get_all_security_groups().should.have.length_of(2) + + +@mock_ec2_deprecated +def test_delete_security_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + security_group1 = conn.create_security_group('test1', 'test1', vpc_id) + + # this should not throw an exception + conn.delete_security_group(group_id=security_group1.id) + + +@mock_ec2_deprecated +def test_authorize_ip_range_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + + with assert_raises(EC2ResponseError) as ex: + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + + security_group = conn.get_all_security_groups(groupnames=['test'])[0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.122/32") + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set') + + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", cidr_ip="123.123.123.123/32") + + security_group = conn.get_all_security_groups()[0] + security_group.rules.should.have.length_of(0) + + # Test for egress as well + egress_security_group = conn.create_security_group( + 'testegress', 'testegress', vpc_id='vpc-3432589') + + with assert_raises(EC2ResponseError) as ex: + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + success = conn.authorize_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + assert success.should.be.true + egress_security_group = conn.get_all_security_groups( + groupnames='testegress')[0] + # There are two egress rules associated with the security group: + # the default outbound rule and the new one + int(egress_security_group.rules_egress[1].to_port).should.equal(2222) + egress_security_group.rules_egress[1].grants[ + 0].cidr_ip.should.equal("123.123.123.123/32") + + # Wrong Cidr should throw error + egress_security_group.revoke.when.called_with( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError) + + # Actually revoke + with assert_raises(EC2ResponseError) as ex: + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set') + + conn.revoke_security_group_egress( + egress_security_group.id, "tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123/32") + + egress_security_group = conn.get_all_security_groups()[0] + # There is still the default outbound rule + egress_security_group.rules_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_other_group_and_revoke(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + other_security_group = conn.create_security_group('other', 'other') + wrong_group = conn.create_security_group('wrong', 'wrong') + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + assert success.should.be.true + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Wrong source group should throw error + with assert_raises(EC2ResponseError) as cm: + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=wrong_group) + cm.exception.code.should.equal('InvalidPermission.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + # Actually revoke + security_group.revoke(ip_protocol="tcp", from_port="22", + to_port="2222", src_group=other_security_group) + + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2 +def test_authorize_other_group_egress_and_revoke(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + + ip_permission = { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', 'UserId': sg02.owner_id}], + 'IpRanges': [] + } + + sg01.authorize_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(2) + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=[ip_permission]) + sg01.ip_permissions_egress.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_group_in_vpc(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = "vpc-12345" + + # create 2 groups in a vpc + security_group = conn.create_security_group('test1', 'test1', vpc_id) + other_security_group = conn.create_security_group('test2', 'test2', vpc_id) + + success = security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # Check that the rule is accurate + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + int(security_group.rules[0].to_port).should.equal(2222) + security_group.rules[0].grants[ + 0].group_id.should.equal(other_security_group.id) + + # Now remove the rule + success = security_group.revoke( + ip_protocol="tcp", from_port="22", to_port="2222", src_group=other_security_group) + success.should.be.true + + # And check that it gets revoked + security_group = [ + group for group in conn.get_all_security_groups() if group.name == 'test1'][0] + security_group.rules.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_get_all_security_groups(): + conn = boto.connect_ec2() + sg1 = conn.create_security_group( + name='test1', description='test1', vpc_id='vpc-mjm05d27') + conn.create_security_group(name='test2', description='test2') + + resp = conn.get_all_security_groups(groupnames=['test1']) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(groupnames=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc-id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'vpc_id': ['vpc-mjm05d27']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups(filters={'description': ['test1']}) + resp.should.have.length_of(1) + resp[0].id.should.equal(sg1.id) + + resp = conn.get_all_security_groups() + resp.should.have.length_of(4) + + +@mock_ec2_deprecated +def test_authorize_bad_cidr_throws_invalid_parameter_value(): + conn = boto.connect_ec2('the_key', 'the_secret') + security_group = conn.create_security_group('test', 'test') + with assert_raises(EC2ResponseError) as cm: + security_group.authorize( + ip_protocol="tcp", from_port="22", to_port="2222", cidr_ip="123.123.123.123") + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_security_group_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + sg = conn.create_security_group("test-sg", "Test SG", vpc.id) + + with assert_raises(EC2ResponseError) as ex: + sg.add_tag("Test", "Tag", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + sg.add_tag("Test", "Tag") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("Test") + tag.value.should.equal("Tag") + + group = conn.get_all_security_groups("test-sg")[0] + group.tags.should.have.length_of(1) + group.tags["Test"].should.equal("Tag") + + +@mock_ec2_deprecated +def test_security_group_tag_filtering(): + conn = boto.connect_ec2() + sg = conn.create_security_group("test-sg", "Test SG") + sg.add_tag("test-tag", "test-value") + + groups = conn.get_all_security_groups( + filters={"tag:test-tag": "test-value"}) + groups.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_authorize_all_protocols_with_no_port_specification(): + conn = boto.connect_ec2() + sg = conn.create_security_group('test', 'test') + + success = sg.authorize(ip_protocol='-1', cidr_ip='0.0.0.0/0') + success.should.be.true + + sg = conn.get_all_security_groups('test')[0] + sg.rules[0].from_port.should.equal(None) + sg.rules[0].to_port.should.equal(None) + + +@mock_ec2_deprecated +def test_sec_group_rule_limit(): + ec2_conn = boto.connect_ec2() + sg = ec2_conn.create_security_group('test', 'test') + other_sg = ec2_conn.create_security_group('test_2', 'test_other') + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(99)]) + success.should.be.true + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 98 CIDR IP rules + 1 by default == 100 the limit + for i in range(98): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='101.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +@mock_ec2_deprecated +def test_sec_group_rule_limit_vpc(): + ec2_conn = boto.connect_ec2() + vpc_conn = boto.connect_vpc() + + vpc = vpc_conn.create_vpc('10.0.0.0/8') + + sg = ec2_conn.create_security_group('test', 'test', vpc_id=vpc.id) + other_sg = ec2_conn.create_security_group('test_2', 'test', vpc_id=vpc.id) + + # INGRESS + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(110)]) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + sg.rules.should.be.empty + # authorize a rule targeting a different sec group (because this count too) + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + success.should.be.true + # fill the rules up the limit + success = ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + cidr_ip=['{0}.0.0.0/0'.format(i) for i in range(49)]) + # verify that we cannot authorize past the limit for a CIDR IP + success.should.be.true + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', cidr_ip=['100.0.0.0/0']) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group( + group_id=sg.id, ip_protocol='-1', + src_security_group_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + # EGRESS + # authorize a rule targeting a different sec group (because this count too) + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + # fill the rules up the limit + # remember that by default, when created a sec group contains 1 egress rule + # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit + for i in range(48): + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='{0}.0.0.0/0'.format(i)) + # verify that we cannot authorize past the limit for a CIDR IP + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + cidr_ip='50.0.0.0/0') + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + # verify that we cannot authorize past the limit for a different sec group + with assert_raises(EC2ResponseError) as cm: + ec2_conn.authorize_security_group_egress( + group_id=sg.id, ip_protocol='-1', + src_group_id=other_sg.id) + cm.exception.error_code.should.equal('RulesPerSecurityGroupLimitExceeded') + + +''' +Boto3 +''' + + +@mock_ec2 +def test_add_same_rule_twice_throws_error(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + sg = ec2.create_security_group( + GroupName='sg1', Description='Test security group sg1', VpcId=vpc.id) + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'IpRanges': [{"CidrIp": "1.2.3.4/32"}] + }, + ] + sg.authorize_ingress(IpPermissions=ip_permissions) + + with assert_raises(ClientError) as ex: + sg.authorize_ingress(IpPermissions=ip_permissions) + + +@mock_ec2 +def test_security_group_tagging_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + + with assert_raises(ClientError) as ex: + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}], DryRun=True) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['Tag']}]) + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_security_group_wildcard_tag_filter_boto3(): + conn = boto3.client('ec2', region_name='us-east-1') + sg = conn.create_security_group(GroupName="test-sg", Description="Test SG") + conn.create_tags(Resources=[sg['GroupId']], Tags=[ + {'Key': 'Test', 'Value': 'Tag'}]) + describe = conn.describe_security_groups( + Filters=[{'Name': 'tag-value', 'Values': ['*']}]) + + tag = describe["SecurityGroups"][0]['Tags'][0] + tag['Value'].should.equal("Tag") + tag['Key'].should.equal("Test") + + +@mock_ec2 +def test_authorize_and_revoke_in_bulk(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + sg01 = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + sg02 = ec2.create_security_group( + GroupName='sg02', Description='Test security group sg02', VpcId=vpc.id) + sg03 = ec2.create_security_group( + GroupName='sg03', Description='Test security group sg03') + + ip_permissions = [ + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'GroupName': 'sg02', + 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27018, + 'ToPort': 27018, + 'UserIdGroupPairs': [{'GroupId': sg02.id, 'UserId': sg02.owner_id}], + 'IpRanges': [] + }, + { + 'IpProtocol': 'tcp', + 'FromPort': 27017, + 'ToPort': 27017, + 'UserIdGroupPairs': [{'GroupName': 'sg03', 'UserId': sg03.owner_id}], + 'IpRanges': [] + } + ] + expected_ip_permissions = copy.deepcopy(ip_permissions) + expected_ip_permissions[1]['UserIdGroupPairs'][0]['GroupName'] = 'sg02' + expected_ip_permissions[2]['UserIdGroupPairs'][0]['GroupId'] = sg03.id + + sg01.authorize_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.have.length_of(3) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.should.contain(ip_permission) + + sg01.revoke_ingress(IpPermissions=ip_permissions) + sg01.ip_permissions.should.be.empty + for ip_permission in expected_ip_permissions: + sg01.ip_permissions.shouldnt.contain(ip_permission) + + sg01.authorize_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(4) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.should.contain(ip_permission) + + sg01.revoke_egress(IpPermissions=ip_permissions) + sg01.ip_permissions_egress.should.have.length_of(1) + for ip_permission in expected_ip_permissions: + sg01.ip_permissions_egress.shouldnt.contain(ip_permission) + + +@mock_ec2 +def test_security_group_ingress_without_multirule(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Fails + assert len(sg.ip_permissions) == 1 + + +@mock_ec2 +def test_security_group_ingress_without_multirule_after_reload(): + ec2 = boto3.resource('ec2', 'ca-central-1') + sg = ec2.create_security_group(Description='Test SG', GroupName='test-sg') + + assert len(sg.ip_permissions) == 0 + sg.authorize_ingress(CidrIp='192.168.0.1/32', FromPort=22, ToPort=22, IpProtocol='tcp') + + # Also Fails + sg_after = ec2.SecurityGroup(sg.id) + assert len(sg_after.ip_permissions) == 1 + + +@mock_ec2_deprecated +def test_get_all_security_groups_filter_with_same_vpc_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + vpc_id = 'vpc-5300000c' + security_group = conn.create_security_group( + 'test1', 'test1', vpc_id=vpc_id) + security_group2 = conn.create_security_group( + 'test2', 'test2', vpc_id=vpc_id) + + security_group.vpc_id.should.equal(vpc_id) + security_group2.vpc_id.should.equal(vpc_id) + + security_groups = conn.get_all_security_groups( + group_ids=[security_group.id], filters={'vpc-id': [vpc_id]}) + security_groups.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_security_groups(group_ids=['does_not_exist']) + cm.exception.code.should.equal('InvalidGroup.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none diff --git a/tests/test_ec2/test_server.py b/tests/test_ec2/test_server.py index 00be62593..dc5657144 100644 --- a/tests/test_ec2/test_server.py +++ b/tests/test_ec2/test_server.py @@ -1,26 +1,26 @@ -from __future__ import unicode_literals -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_ec2_server_get(): - backend = server.create_backend_app("ec2") - test_client = backend.test_client() - - res = test_client.get( - '/?Action=RunInstances&ImageId=ami-60a54009', - headers={"Host": "ec2.us-east-1.amazonaws.com"} - ) - - groups = re.search("(.*)", - res.data.decode('utf-8')) - instance_id = groups.groups()[0] - - res = test_client.get('/?Action=DescribeInstances') - res.data.decode('utf-8').should.contain(instance_id) +from __future__ import unicode_literals +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_ec2_server_get(): + backend = server.create_backend_app("ec2") + test_client = backend.test_client() + + res = test_client.get( + '/?Action=RunInstances&ImageId=ami-60a54009', + headers={"Host": "ec2.us-east-1.amazonaws.com"} + ) + + groups = re.search("(.*)", + res.data.decode('utf-8')) + instance_id = groups.groups()[0] + + res = test_client.get('/?Action=DescribeInstances') + res.data.decode('utf-8').should.contain(instance_id) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a2bd1d061..01b05566a 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -1,345 +1,345 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa - -from moto import mock_ec2 - - -def get_subnet_id(conn): - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - return subnet_id - - -def spot_config(subnet_id, allocation_strategy="lowestPrice"): - return { - 'ClientToken': 'string', - 'SpotPrice': '0.12', - 'TargetCapacity': 6, - 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', - 'LaunchSpecifications': [{ - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.small', - 'BlockDeviceMappings': [ - { - 'VirtualName': 'string', - 'DeviceName': 'string', - 'Ebs': { - 'SnapshotId': 'string', - 'VolumeSize': 123, - 'DeleteOnTermination': True | False, - 'VolumeType': 'standard', - 'Iops': 123, - 'Encrypted': True | False - }, - 'NoDevice': 'string' - }, - ], - 'Monitoring': { - 'Enabled': True - }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 2.0, - 'SpotPrice': '0.13' - }, { - 'ImageId': 'ami-123', - 'KeyName': 'my-key', - 'SecurityGroups': [ - { - 'GroupId': 'sg-123' - }, - ], - 'UserData': 'some user data', - 'InstanceType': 't2.large', - 'Monitoring': { - 'Enabled': True - }, - 'SubnetId': subnet_id, - 'IamInstanceProfile': { - 'Arn': 'arn:aws:iam::123456789012:role/fleet' - }, - 'EbsOptimized': False, - 'WeightedCapacity': 4.0, - 'SpotPrice': '10.00', - }], - 'AllocationStrategy': allocation_strategy, - 'FulfilledCapacity': 6, - } - - -@mock_ec2 -def test_create_spot_fleet_with_lowest_price(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id) - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_request['SpotFleetRequestState'].should.equal("active") - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - spot_fleet_config['SpotPrice'].should.equal('0.12') - spot_fleet_config['TargetCapacity'].should.equal(6) - spot_fleet_config['IamFleetRole'].should.equal( - 'arn:aws:iam::123456789012:role/fleet') - spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec = spot_fleet_config['LaunchSpecifications'][0] - - launch_spec['EbsOptimized'].should.equal(False) - launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) - launch_spec['IamInstanceProfile'].should.equal( - {"Arn": "arn:aws:iam::123456789012:role/fleet"}) - launch_spec['ImageId'].should.equal("ami-123") - launch_spec['InstanceType'].should.equal("t2.small") - launch_spec['KeyName'].should.equal("my-key") - launch_spec['Monitoring'].should.equal({"Enabled": True}) - launch_spec['SpotPrice'].should.equal("0.13") - launch_spec['SubnetId'].should.equal(subnet_id) - launch_spec['UserData'].should.equal("some user data") - launch_spec['WeightedCapacity'].should.equal(2.0) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - -@mock_ec2 -def test_create_diversified_spot_fleet(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - diversified_config = spot_config( - subnet_id, allocation_strategy='diversified') - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=diversified_config - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(2) - instance_types = set([instance['InstanceType'] for instance in instances]) - instance_types.should.equal(set(["t2.small", "t2.large"])) - instances[0]['InstanceId'].should.contain("i-") - - -@mock_ec2 -def test_cancel_spot_fleet_request(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.cancel_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) - - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(0) - - -@mock_ec2 -def test_modify_spot_fleet_request_up(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=20) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(10) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(20) - spot_fleet_config['FulfilledCapacity'].should.equal(20.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_up_diversified(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config( - subnet_id, allocation_strategy='diversified'), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=19) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(7) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(19) - spot_fleet_config['FulfilledCapacity'].should.equal(20.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_no_terminate(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_odd(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=7) - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=5) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(3) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(5) - spot_fleet_config['FulfilledCapacity'].should.equal(6.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1) - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(1) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2.0) - - -@mock_ec2 -def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - spot_fleet_res = conn.request_spot_fleet( - SpotFleetRequestConfig=spot_config(subnet_id), - ) - spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]]) - - conn.modify_spot_fleet_request( - SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") - - instance_res = conn.describe_spot_fleet_instances( - SpotFleetRequestId=spot_fleet_id) - instances = instance_res['ActiveInstances'] - len(instances).should.equal(1) - - spot_fleet_config = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] - spot_fleet_config['TargetCapacity'].should.equal(1) - spot_fleet_config['FulfilledCapacity'].should.equal(2.0) - - -@mock_ec2 -def test_create_spot_fleet_without_spot_price(): - conn = boto3.client("ec2", region_name='us-west-2') - subnet_id = get_subnet_id(conn) - - # remove prices to force a fallback to ondemand price - spot_config_without_price = spot_config(subnet_id) - del spot_config_without_price['SpotPrice'] - for spec in spot_config_without_price['LaunchSpecifications']: - del spec['SpotPrice'] - - spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] - spot_fleet_requests = conn.describe_spot_fleet_requests( - SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] - len(spot_fleet_requests).should.equal(1) - spot_fleet_request = spot_fleet_requests[0] - spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] - - len(spot_fleet_config['LaunchSpecifications']).should.equal(2) - launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] - launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] - - # AWS will figure out the price - assert 'SpotPrice' not in launch_spec1 - assert 'SpotPrice' not in launch_spec2 +from __future__ import unicode_literals + +import boto3 +import sure # noqa + +from moto import mock_ec2 + + +def get_subnet_id(conn): + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + return subnet_id + + +def spot_config(subnet_id, allocation_strategy="lowestPrice"): + return { + 'ClientToken': 'string', + 'SpotPrice': '0.12', + 'TargetCapacity': 6, + 'IamFleetRole': 'arn:aws:iam::123456789012:role/fleet', + 'LaunchSpecifications': [{ + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.small', + 'BlockDeviceMappings': [ + { + 'VirtualName': 'string', + 'DeviceName': 'string', + 'Ebs': { + 'SnapshotId': 'string', + 'VolumeSize': 123, + 'DeleteOnTermination': True | False, + 'VolumeType': 'standard', + 'Iops': 123, + 'Encrypted': True | False + }, + 'NoDevice': 'string' + }, + ], + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 2.0, + 'SpotPrice': '0.13' + }, { + 'ImageId': 'ami-123', + 'KeyName': 'my-key', + 'SecurityGroups': [ + { + 'GroupId': 'sg-123' + }, + ], + 'UserData': 'some user data', + 'InstanceType': 't2.large', + 'Monitoring': { + 'Enabled': True + }, + 'SubnetId': subnet_id, + 'IamInstanceProfile': { + 'Arn': 'arn:aws:iam::123456789012:role/fleet' + }, + 'EbsOptimized': False, + 'WeightedCapacity': 4.0, + 'SpotPrice': '10.00', + }], + 'AllocationStrategy': allocation_strategy, + 'FulfilledCapacity': 6, + } + + +@mock_ec2 +def test_create_spot_fleet_with_lowest_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id) + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + spot_fleet_config['SpotPrice'].should.equal('0.12') + spot_fleet_config['TargetCapacity'].should.equal(6) + spot_fleet_config['IamFleetRole'].should.equal( + 'arn:aws:iam::123456789012:role/fleet') + spot_fleet_config['AllocationStrategy'].should.equal('lowestPrice') + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec = spot_fleet_config['LaunchSpecifications'][0] + + launch_spec['EbsOptimized'].should.equal(False) + launch_spec['SecurityGroups'].should.equal([{"GroupId": "sg-123"}]) + launch_spec['IamInstanceProfile'].should.equal( + {"Arn": "arn:aws:iam::123456789012:role/fleet"}) + launch_spec['ImageId'].should.equal("ami-123") + launch_spec['InstanceType'].should.equal("t2.small") + launch_spec['KeyName'].should.equal("my-key") + launch_spec['Monitoring'].should.equal({"Enabled": True}) + launch_spec['SpotPrice'].should.equal("0.13") + launch_spec['SubnetId'].should.equal(subnet_id) + launch_spec['UserData'].should.equal("some user data") + launch_spec['WeightedCapacity'].should.equal(2.0) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + +@mock_ec2 +def test_create_diversified_spot_fleet(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + diversified_config = spot_config( + subnet_id, allocation_strategy='diversified') + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=diversified_config + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(2) + instance_types = set([instance['InstanceType'] for instance in instances]) + instance_types.should.equal(set(["t2.small", "t2.large"])) + instances[0]['InstanceId'].should.contain("i-") + + +@mock_ec2 +def test_cancel_spot_fleet_request(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.cancel_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id], TerminateInstances=True) + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(0) + + +@mock_ec2 +def test_modify_spot_fleet_request_up(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=20) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(10) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(20) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_up_diversified(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config( + subnet_id, allocation_strategy='diversified'), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=19) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(7) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(19) + spot_fleet_config['FulfilledCapacity'].should.equal(20.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_odd(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=7) + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=5) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(3) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(5) + spot_fleet_config['FulfilledCapacity'].should.equal(6.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1) + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + spot_fleet_res = conn.request_spot_fleet( + SpotFleetRequestConfig=spot_config(subnet_id), + ) + spot_fleet_id = spot_fleet_res['SpotFleetRequestId'] + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + conn.terminate_instances(InstanceIds=[i['InstanceId'] for i in instances[1:]]) + + conn.modify_spot_fleet_request( + SpotFleetRequestId=spot_fleet_id, TargetCapacity=1, ExcessCapacityTerminationPolicy="noTermination") + + instance_res = conn.describe_spot_fleet_instances( + SpotFleetRequestId=spot_fleet_id) + instances = instance_res['ActiveInstances'] + len(instances).should.equal(1) + + spot_fleet_config = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] + spot_fleet_config['TargetCapacity'].should.equal(1) + spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_create_spot_fleet_without_spot_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + # remove prices to force a fallback to ondemand price + spot_config_without_price = spot_config(subnet_id) + del spot_config_without_price['SpotPrice'] + for spec in spot_config_without_price['LaunchSpecifications']: + del spec['SpotPrice'] + + spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + # AWS will figure out the price + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 diff --git a/tests/test_ec2/test_spot_instances.py b/tests/test_ec2/test_spot_instances.py index 05f8ee88f..51590ed46 100644 --- a/tests/test_ec2/test_spot_instances.py +++ b/tests/test_ec2/test_spot_instances.py @@ -1,268 +1,268 @@ -from __future__ import unicode_literals -from nose.tools import assert_raises -import datetime - -import boto -import boto3 -from boto.exception import EC2ResponseError -from botocore.exceptions import ClientError -import pytz -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from moto.backends import get_model -from moto.core.utils import iso_8601_datetime_with_milliseconds - - -@mock_ec2 -def test_request_spot_instances(): - conn = boto3.client('ec2', 'us-east-1') - vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] - subnet = conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] - subnet_id = subnet['SubnetId'] - - conn.create_security_group(GroupName='group1', Description='description') - conn.create_security_group(GroupName='group2', Description='description') - - start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) - end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) - start = iso_8601_datetime_with_milliseconds(start_dt) - end = iso_8601_datetime_with_milliseconds(end_dt) - - with assert_raises(ClientError) as ex: - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - DryRun=True, - ) - ex.exception.response['Error']['Code'].should.equal('DryRunOperation') - ex.exception.response['ResponseMetadata'][ - 'HTTPStatusCode'].should.equal(400) - ex.exception.response['Error']['Message'].should.equal( - 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - request = conn.request_spot_instances( - SpotPrice="0.5", InstanceCount=1, Type='one-time', - ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", - AvailabilityZoneGroup='my-group', - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - "KeyName": "test", - "SecurityGroups": ['group1', 'group2'], - "UserData": "some test data", - "InstanceType": 'm1.small', - "Placement": { - "AvailabilityZone": 'us-east-1c', - }, - "KernelId": "test-kernel", - "RamdiskId": "test-ramdisk", - "Monitoring": { - "Enabled": True, - }, - "SubnetId": subnet_id, - }, - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request['ValidFrom'].should.equal(start_dt) - request['ValidUntil'].should.equal(end_dt) - request['LaunchGroup'].should.equal("the-group") - request['AvailabilityZoneGroup'].should.equal('my-group') - - launch_spec = request['LaunchSpecification'] - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - set(security_group_names).should.equal(set(['group1', 'group2'])) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - launch_spec['KeyName'].should.equal("test") - launch_spec['InstanceType'].should.equal('m1.small') - launch_spec['KernelId'].should.equal("test-kernel") - launch_spec['RamdiskId'].should.equal("test-ramdisk") - launch_spec['SubnetId'].should.equal(subnet_id) - - -@mock_ec2 -def test_request_spot_instances_default_arguments(): - """ - Test that moto set the correct default arguments - """ - conn = boto3.client('ec2', 'us-east-1') - - request = conn.request_spot_instances( - SpotPrice="0.5", - LaunchSpecification={ - "ImageId": 'ami-abcd1234', - } - ) - - requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] - requests.should.have.length_of(1) - request = requests[0] - - request['State'].should.equal("open") - request['SpotPrice'].should.equal("0.5") - request['Type'].should.equal('one-time') - request.shouldnt.contain('ValidFrom') - request.shouldnt.contain('ValidUntil') - request.shouldnt.contain('LaunchGroup') - request.shouldnt.contain('AvailabilityZoneGroup') - - launch_spec = request['LaunchSpecification'] - - security_group_names = [group['GroupName'] - for group in launch_spec['SecurityGroups']] - security_group_names.should.equal(["default"]) - - launch_spec['ImageId'].should.equal('ami-abcd1234') - request.shouldnt.contain('KeyName') - launch_spec['InstanceType'].should.equal('m1.small') - request.shouldnt.contain('KernelId') - request.shouldnt.contain('RamdiskId') - request.shouldnt.contain('SubnetId') - - -@mock_ec2_deprecated -def test_cancel_spot_instance_request(): - conn = boto.connect_ec2() - - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as ex: - conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') - - conn.cancel_spot_instance_requests([requests[0].id]) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_request_spot_instances_fulfilled(): - """ - Test that moto correctly fullfills a spot instance request - """ - conn = boto.ec2.connect_to_region("us-east-1") - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("open") - - get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - request.state.should.equal("active") - - -@mock_ec2_deprecated -def test_tag_spot_instance_request(): - """ - Test that moto correctly tags a spot instance request - """ - conn = boto.connect_ec2() - - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request[0].add_tag('tag1', 'value1') - request[0].add_tag('tag2', 'value2') - - requests = conn.get_all_spot_instance_requests() - requests.should.have.length_of(1) - request = requests[0] - - tag_dict = dict(request.tags) - tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) - - -@mock_ec2_deprecated -def test_get_all_spot_instance_requests_filtering(): - """ - Test that moto correctly filters spot instance requests - """ - conn = boto.connect_ec2() - - request1 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request2 = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234', - ) - request1[0].add_tag('tag1', 'value1') - request1[0].add_tag('tag2', 'value2') - request2[0].add_tag('tag1', 'value1') - request2[0].add_tag('tag2', 'wrong') - - requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) - requests.should.have.length_of(0) - - requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) - requests.should.have.length_of(3) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1'}) - requests.should.have.length_of(2) - - requests = conn.get_all_spot_instance_requests( - filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) - requests.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_request_spot_instances_setting_instance_id(): - conn = boto.ec2.connect_to_region("us-east-1") - request = conn.request_spot_instances( - price=0.5, image_id='ami-abcd1234') - - req = get_model('SpotInstanceRequest', 'us-east-1')[0] - req.state = 'active' - req.instance_id = 'i-12345678' - - request = conn.get_all_spot_instance_requests()[0] - assert request.state == 'active' - assert request.instance_id == 'i-12345678' +from __future__ import unicode_literals +from nose.tools import assert_raises +import datetime + +import boto +import boto3 +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +import pytz +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from moto.backends import get_model +from moto.core.utils import iso_8601_datetime_with_milliseconds + + +@mock_ec2 +def test_request_spot_instances(): + conn = boto3.client('ec2', 'us-east-1') + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + conn.create_security_group(GroupName='group1', Description='description') + conn.create_security_group(GroupName='group2', Description='description') + + start_dt = datetime.datetime(2013, 1, 1).replace(tzinfo=pytz.utc) + end_dt = datetime.datetime(2013, 1, 2).replace(tzinfo=pytz.utc) + start = iso_8601_datetime_with_milliseconds(start_dt) + end = iso_8601_datetime_with_milliseconds(end_dt) + + with assert_raises(ClientError) as ex: + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + DryRun=True, + ) + ex.exception.response['Error']['Code'].should.equal('DryRunOperation') + ex.exception.response['ResponseMetadata'][ + 'HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal( + 'An error occurred (DryRunOperation) when calling the RequestSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + request = conn.request_spot_instances( + SpotPrice="0.5", InstanceCount=1, Type='one-time', + ValidFrom=start, ValidUntil=end, LaunchGroup="the-group", + AvailabilityZoneGroup='my-group', + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + "KeyName": "test", + "SecurityGroups": ['group1', 'group2'], + "UserData": "some test data", + "InstanceType": 'm1.small', + "Placement": { + "AvailabilityZone": 'us-east-1c', + }, + "KernelId": "test-kernel", + "RamdiskId": "test-ramdisk", + "Monitoring": { + "Enabled": True, + }, + "SubnetId": subnet_id, + }, + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request['ValidFrom'].should.equal(start_dt) + request['ValidUntil'].should.equal(end_dt) + request['LaunchGroup'].should.equal("the-group") + request['AvailabilityZoneGroup'].should.equal('my-group') + + launch_spec = request['LaunchSpecification'] + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + set(security_group_names).should.equal(set(['group1', 'group2'])) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + launch_spec['KeyName'].should.equal("test") + launch_spec['InstanceType'].should.equal('m1.small') + launch_spec['KernelId'].should.equal("test-kernel") + launch_spec['RamdiskId'].should.equal("test-ramdisk") + launch_spec['SubnetId'].should.equal(subnet_id) + + +@mock_ec2 +def test_request_spot_instances_default_arguments(): + """ + Test that moto set the correct default arguments + """ + conn = boto3.client('ec2', 'us-east-1') + + request = conn.request_spot_instances( + SpotPrice="0.5", + LaunchSpecification={ + "ImageId": 'ami-abcd1234', + } + ) + + requests = conn.describe_spot_instance_requests()['SpotInstanceRequests'] + requests.should.have.length_of(1) + request = requests[0] + + request['State'].should.equal("open") + request['SpotPrice'].should.equal("0.5") + request['Type'].should.equal('one-time') + request.shouldnt.contain('ValidFrom') + request.shouldnt.contain('ValidUntil') + request.shouldnt.contain('LaunchGroup') + request.shouldnt.contain('AvailabilityZoneGroup') + + launch_spec = request['LaunchSpecification'] + + security_group_names = [group['GroupName'] + for group in launch_spec['SecurityGroups']] + security_group_names.should.equal(["default"]) + + launch_spec['ImageId'].should.equal('ami-abcd1234') + request.shouldnt.contain('KeyName') + launch_spec['InstanceType'].should.equal('m1.small') + request.shouldnt.contain('KernelId') + request.shouldnt.contain('RamdiskId') + request.shouldnt.contain('SubnetId') + + +@mock_ec2_deprecated +def test_cancel_spot_instance_request(): + conn = boto.connect_ec2() + + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as ex: + conn.cancel_spot_instance_requests([requests[0].id], dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CancelSpotInstance operation: Request would have succeeded, but DryRun flag is set') + + conn.cancel_spot_instance_requests([requests[0].id]) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_request_spot_instances_fulfilled(): + """ + Test that moto correctly fullfills a spot instance request + """ + conn = boto.ec2.connect_to_region("us-east-1") + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("open") + + get_model('SpotInstanceRequest', 'us-east-1')[0].state = 'active' + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + request.state.should.equal("active") + + +@mock_ec2_deprecated +def test_tag_spot_instance_request(): + """ + Test that moto correctly tags a spot instance request + """ + conn = boto.connect_ec2() + + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request[0].add_tag('tag1', 'value1') + request[0].add_tag('tag2', 'value2') + + requests = conn.get_all_spot_instance_requests() + requests.should.have.length_of(1) + request = requests[0] + + tag_dict = dict(request.tags) + tag_dict.should.equal({'tag1': 'value1', 'tag2': 'value2'}) + + +@mock_ec2_deprecated +def test_get_all_spot_instance_requests_filtering(): + """ + Test that moto correctly filters spot instance requests + """ + conn = boto.connect_ec2() + + request1 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request2 = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234', + ) + request1[0].add_tag('tag1', 'value1') + request1[0].add_tag('tag2', 'value2') + request2[0].add_tag('tag1', 'value1') + request2[0].add_tag('tag2', 'wrong') + + requests = conn.get_all_spot_instance_requests(filters={'state': 'active'}) + requests.should.have.length_of(0) + + requests = conn.get_all_spot_instance_requests(filters={'state': 'open'}) + requests.should.have.length_of(3) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1'}) + requests.should.have.length_of(2) + + requests = conn.get_all_spot_instance_requests( + filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'}) + requests.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_request_spot_instances_setting_instance_id(): + conn = boto.ec2.connect_to_region("us-east-1") + request = conn.request_spot_instances( + price=0.5, image_id='ami-abcd1234') + + req = get_model('SpotInstanceRequest', 'us-east-1')[0] + req.state = 'active' + req.instance_id = 'i-12345678' + + request = conn.get_all_spot_instance_requests()[0] + assert request.state == 'active' + assert request.instance_id == 'i-12345678' diff --git a/tests/test_ec2/test_subnets.py b/tests/test_ec2/test_subnets.py index 99e6d45d8..3fb122807 100644 --- a/tests/test_ec2/test_subnets.py +++ b/tests/test_ec2/test_subnets.py @@ -1,291 +1,291 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import boto3 -import boto -import boto.vpc -from boto.exception import EC2ResponseError -from botocore.exceptions import ParamValidationError -import json -import sure # noqa - -from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_subnets(): - ec2 = boto.connect_ec2('the_key', 'the_secret') - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) - - conn.delete_subnet(subnet.id) - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_subnet(subnet.id) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_create_vpc_validation(): - conn = boto.connect_vpc('the_key', 'the_secret') - - with assert_raises(EC2ResponseError) as cm: - conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_subnet_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - - subnet.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.tags.should.have.length_of(1) - subnet.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_subnet_should_have_proper_availability_zone_set(): - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') - subnetA.availability_zone.should.equal('us-west-1b') - - -@mock_ec2 -def test_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - subnet = ec2.create_subnet( - VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2_deprecated -def test_non_default_subnet(): - vpc_cli = boto.vpc.connect_to_region('us-west-1') - - # Create the non default VPC - vpc = vpc_cli.create_vpc("10.0.0.0/16") - vpc.is_default.shouldnt.be.ok - - subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") - subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] - subnet.mapPublicIpOnLaunch.should.equal('false') - - -@mock_ec2 -def test_boto3_non_default_subnet(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - - # Get the default VPC - vpc = list(ec2.vpcs.all())[0] - - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action - subnet.reload() - - # For non default subnet, attribute value should be 'False' - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) - subnet.reload() - subnet.map_public_ip_on_launch.shouldnt.be.ok - - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) - subnet.reload() - subnet.map_public_ip_on_launch.should.be.ok - - -@mock_ec2 -def test_modify_subnet_attribute_validation(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - client = boto3.client('ec2', region_name='us-west-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet( - VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') - - with assert_raises(ParamValidationError): - client.modify_subnet_attribute( - SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) - - -@mock_ec2_deprecated -def test_subnet_get_by_id(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) - subnets_by_id.should.have.length_of(2) - subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) - subnetA.id.should.be.within(subnets_by_id) - subnetB1.id.should.be.within(subnets_by_id) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) - cm.exception.code.should.equal('InvalidSubnetID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_get_subnets_filtering(): - ec2 = boto.ec2.connect_to_region('us-west-1') - conn = boto.vpc.connect_to_region('us-west-1') - vpcA = conn.create_vpc("10.0.0.0/16") - subnetA = conn.create_subnet( - vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') - vpcB = conn.create_vpc("10.0.0.0/16") - subnetB1 = conn.create_subnet( - vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') - subnetB2 = conn.create_subnet( - vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') - - all_subnets = conn.get_all_subnets() - all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) - - # Filter by VPC ID - subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) - subnets_by_vpc.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_vpc]).should.equal( - set([subnetB1.id, subnetB2.id])) - - # Filter by CIDR variations - subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) - subnets_by_cidr1.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr1] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr2 = conn.get_all_subnets( - filters={'cidr-block': "10.0.0.0/24"}) - subnets_by_cidr2.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr2] - ).should.equal(set([subnetA.id, subnetB1.id])) - - subnets_by_cidr3 = conn.get_all_subnets( - filters={'cidrBlock': "10.0.0.0/24"}) - subnets_by_cidr3.should.have.length_of(2) - set([subnet.id for subnet in subnets_by_cidr3] - ).should.equal(set([subnetA.id, subnetB1.id])) - - # Filter by VPC ID and CIDR - subnets_by_vpc_and_cidr = conn.get_all_subnets( - filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) - subnets_by_vpc_and_cidr.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_vpc_and_cidr] - ).should.equal(set([subnetB1.id])) - - # Filter by subnet ID - subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) - subnets_by_id.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) - - # Filter by availabilityZone - subnets_by_az = conn.get_all_subnets( - filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) - subnets_by_az.should.have.length_of(1) - set([subnet.id for subnet in subnets_by_az] - ).should.equal(set([subnetB1.id])) - - # Filter by defaultForAz - - subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) - subnets_by_az.should.have.length_of(len(conn.get_all_zones())) - - # Unsupported filter - conn.get_all_subnets.when.called_with( - filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) - - -@mock_ec2_deprecated -@mock_cloudformation_deprecated -def test_subnet_tags_through_cloudformation(): - vpc_conn = boto.vpc.connect_to_region('us-west-1') - vpc = vpc_conn.create_vpc("10.0.0.0/16") - - subnet_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Resources": { - "testSubnet": { - "Type": "AWS::EC2::Subnet", - "Properties": { - "VpcId": vpc.id, - "CidrBlock": "10.0.0.0/24", - "AvailabilityZone": "us-west-1b", - "Tags": [{ - "Key": "foo", - "Value": "bar", - }, { - "Key": "blah", - "Value": "baz", - }] - } - } - } - } - cf_conn = boto.cloudformation.connect_to_region("us-west-1") - template_json = json.dumps(subnet_template) - cf_conn.create_stack( - "test_stack", - template_body=template_json, - ) - - subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] - subnet.tags["foo"].should.equal("bar") - subnet.tags["blah"].should.equal("baz") +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import boto3 +import boto +import boto.vpc +from boto.exception import EC2ResponseError +from botocore.exceptions import ParamValidationError +import json +import sure # noqa + +from moto import mock_cloudformation_deprecated, mock_ec2, mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_subnets(): + ec2 = boto.connect_ec2('the_key', 'the_secret') + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(1 + len(ec2.get_all_zones())) + + conn.delete_subnet(subnet.id) + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(0 + len(ec2.get_all_zones())) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_subnet(subnet.id) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_create_vpc_validation(): + conn = boto.connect_vpc('the_key', 'the_secret') + + with assert_raises(EC2ResponseError) as cm: + conn.create_subnet("vpc-abcd1234", "10.0.0.0/18") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_subnet_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") + + subnet.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + subnet = conn.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.tags.should.have.length_of(1) + subnet.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_subnet_should_have_proper_availability_zone_set(): + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1b') + subnetA.availability_zone.should.equal('us-west-1b') + + +@mock_ec2 +def test_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + subnet = ec2.create_subnet( + VpcId=default_vpc.id, CidrBlock='172.31.0.0/20', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2_deprecated +def test_non_default_subnet(): + vpc_cli = boto.vpc.connect_to_region('us-west-1') + + # Create the non default VPC + vpc = vpc_cli.create_vpc("10.0.0.0/16") + vpc.is_default.shouldnt.be.ok + + subnet = vpc_cli.create_subnet(vpc.id, "10.0.0.0/24") + subnet = vpc_cli.get_all_subnets(subnet_ids=[subnet.id])[0] + subnet.mapPublicIpOnLaunch.should.equal('false') + + +@mock_ec2 +def test_boto3_non_default_subnet(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + + # Get the default VPC + vpc = list(ec2.vpcs.all())[0] + + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + # 'map_public_ip_on_launch' is set when calling 'DescribeSubnets' action + subnet.reload() + + # For non default subnet, attribute value should be 'False' + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': False}) + subnet.reload() + subnet.map_public_ip_on_launch.shouldnt.be.ok + + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': True}) + subnet.reload() + subnet.map_public_ip_on_launch.should.be.ok + + +@mock_ec2 +def test_modify_subnet_attribute_validation(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + client = boto3.client('ec2', region_name='us-west-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet( + VpcId=vpc.id, CidrBlock='10.0.0.0/24', AvailabilityZone='us-west-1a') + + with assert_raises(ParamValidationError): + client.modify_subnet_attribute( + SubnetId=subnet.id, MapPublicIpOnLaunch={'Value': 'invalid'}) + + +@mock_ec2_deprecated +def test_subnet_get_by_id(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + subnets_by_id = conn.get_all_subnets(subnet_ids=[subnetA.id, subnetB1.id]) + subnets_by_id.should.have.length_of(2) + subnets_by_id = tuple(map(lambda s: s.id, subnets_by_id)) + subnetA.id.should.be.within(subnets_by_id) + subnetB1.id.should.be.within(subnets_by_id) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_subnets(subnet_ids=['subnet-does_not_exist']) + cm.exception.code.should.equal('InvalidSubnetID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_get_subnets_filtering(): + ec2 = boto.ec2.connect_to_region('us-west-1') + conn = boto.vpc.connect_to_region('us-west-1') + vpcA = conn.create_vpc("10.0.0.0/16") + subnetA = conn.create_subnet( + vpcA.id, "10.0.0.0/24", availability_zone='us-west-1a') + vpcB = conn.create_vpc("10.0.0.0/16") + subnetB1 = conn.create_subnet( + vpcB.id, "10.0.0.0/24", availability_zone='us-west-1a') + subnetB2 = conn.create_subnet( + vpcB.id, "10.0.1.0/24", availability_zone='us-west-1b') + + all_subnets = conn.get_all_subnets() + all_subnets.should.have.length_of(3 + len(ec2.get_all_zones())) + + # Filter by VPC ID + subnets_by_vpc = conn.get_all_subnets(filters={'vpc-id': vpcB.id}) + subnets_by_vpc.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_vpc]).should.equal( + set([subnetB1.id, subnetB2.id])) + + # Filter by CIDR variations + subnets_by_cidr1 = conn.get_all_subnets(filters={'cidr': "10.0.0.0/24"}) + subnets_by_cidr1.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr1] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr2 = conn.get_all_subnets( + filters={'cidr-block': "10.0.0.0/24"}) + subnets_by_cidr2.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr2] + ).should.equal(set([subnetA.id, subnetB1.id])) + + subnets_by_cidr3 = conn.get_all_subnets( + filters={'cidrBlock': "10.0.0.0/24"}) + subnets_by_cidr3.should.have.length_of(2) + set([subnet.id for subnet in subnets_by_cidr3] + ).should.equal(set([subnetA.id, subnetB1.id])) + + # Filter by VPC ID and CIDR + subnets_by_vpc_and_cidr = conn.get_all_subnets( + filters={'vpc-id': vpcB.id, 'cidr': "10.0.0.0/24"}) + subnets_by_vpc_and_cidr.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_vpc_and_cidr] + ).should.equal(set([subnetB1.id])) + + # Filter by subnet ID + subnets_by_id = conn.get_all_subnets(filters={'subnet-id': subnetA.id}) + subnets_by_id.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_id]).should.equal(set([subnetA.id])) + + # Filter by availabilityZone + subnets_by_az = conn.get_all_subnets( + filters={'availabilityZone': 'us-west-1a', 'vpc-id': vpcB.id}) + subnets_by_az.should.have.length_of(1) + set([subnet.id for subnet in subnets_by_az] + ).should.equal(set([subnetB1.id])) + + # Filter by defaultForAz + + subnets_by_az = conn.get_all_subnets(filters={'defaultForAz': "true"}) + subnets_by_az.should.have.length_of(len(conn.get_all_zones())) + + # Unsupported filter + conn.get_all_subnets.when.called_with( + filters={'not-implemented-filter': 'foobar'}).should.throw(NotImplementedError) + + +@mock_ec2_deprecated +@mock_cloudformation_deprecated +def test_subnet_tags_through_cloudformation(): + vpc_conn = boto.vpc.connect_to_region('us-west-1') + vpc = vpc_conn.create_vpc("10.0.0.0/16") + + subnet_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Resources": { + "testSubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "VpcId": vpc.id, + "CidrBlock": "10.0.0.0/24", + "AvailabilityZone": "us-west-1b", + "Tags": [{ + "Key": "foo", + "Value": "bar", + }, { + "Key": "blah", + "Value": "baz", + }] + } + } + } + } + cf_conn = boto.cloudformation.connect_to_region("us-west-1") + template_json = json.dumps(subnet_template) + cf_conn.create_stack( + "test_stack", + template_body=template_json, + ) + + subnet = vpc_conn.get_all_subnets(filters={'cidrBlock': '10.0.0.0/24'})[0] + subnet.tags["foo"].should.equal("bar") + subnet.tags["blah"].should.equal("baz") diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index c92a4f81f..ac213857a 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -1,453 +1,453 @@ -from __future__ import unicode_literals -from nose.tools import assert_raises - -import itertools - -import boto -import boto3 -from boto.exception import EC2ResponseError -from boto.ec2.instance import Reservation -import sure # noqa - -from moto import mock_ec2_deprecated, mock_ec2 -from nose.tools import assert_raises - - -@mock_ec2_deprecated -def test_add_tag(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as ex: - instance.add_tag("a key", "some value", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - instance.add_tag("a key", "some value") - chain = itertools.chain.from_iterable - existing_instances = list( - chain([res.instances for res in conn.get_all_instances()])) - existing_instances.should.have.length_of(1) - existing_instance = existing_instances[0] - existing_instance.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_remove_tag(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - with assert_raises(EC2ResponseError) as ex: - instance.remove_tag("a key", dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') - - instance.remove_tag("a key") - conn.get_all_tags().should.have.length_of(0) - - instance.add_tag("a key", "some value") - conn.get_all_tags().should.have.length_of(1) - instance.remove_tag("a key", "some value") - - -@mock_ec2_deprecated -def test_get_all_tags(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_with_special_characters(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - instance.add_tag("a key", "some<> value") - - tags = conn.get_all_tags() - tag = tags[0] - tag.name.should.equal("a key") - tag.value.should.equal("some<> value") - - -@mock_ec2_deprecated -def test_create_tags(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - tag_dict = {'a key': 'some value', - 'another key': 'some other value', - 'blank key': ''} - - with assert_raises(EC2ResponseError) as ex: - conn.create_tags(instance.id, tag_dict, dry_run=True) - ex.exception.error_code.should.equal('DryRunOperation') - ex.exception.status.should.equal(400) - ex.exception.message.should.equal( - 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - - conn.create_tags(instance.id, tag_dict) - tags = conn.get_all_tags() - set([key for key in tag_dict]).should.equal( - set([tag.name for tag in tags])) - set([tag_dict[key] for key in tag_dict]).should.equal( - set([tag.value for tag in tags])) - - -@mock_ec2_deprecated -def test_tag_limit_exceeded(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - tag_dict = {} - for i in range(51): - tag_dict['{0:02d}'.format(i + 1)] = '' - - with assert_raises(EC2ResponseError) as cm: - conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal('TagLimitExceeded') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - instance.add_tag("a key", "a value") - with assert_raises(EC2ResponseError) as cm: - conn.create_tags(instance.id, tag_dict) - cm.exception.code.should.equal('TagLimitExceeded') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - tags = conn.get_all_tags() - tag = tags[0] - tags.should.have.length_of(1) - tag.name.should.equal("a key") - tag.value.should.equal("a value") - - -@mock_ec2_deprecated -def test_invalid_parameter_tag_null(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - - with assert_raises(EC2ResponseError) as cm: - instance.add_tag("a key", None) - cm.exception.code.should.equal('InvalidParameterValue') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_invalid_id(): - conn = boto.connect_ec2('the_key', 'the_secret') - with assert_raises(EC2ResponseError) as cm: - conn.create_tags('ami-blah', {'key': 'tag'}) - cm.exception.code.should.equal('InvalidID') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - with assert_raises(EC2ResponseError) as cm: - conn.create_tags('blah-blah', {'key': 'tag'}) - cm.exception.code.should.equal('InvalidID') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_get_all_tags_resource_id_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'resource-id': instance.id}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - tags = conn.get_all_tags(filters={'resource-id': image_id}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(image_id) - tag.res_type.should.equal('image') - tag.name.should.equal("an image key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_resource_type_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'resource-type': 'instance'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - tags = conn.get_all_tags(filters={'resource-type': 'image'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(image_id) - tag.res_type.should.equal('image') - tag.name.should.equal("an image key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_key_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'key': 'an instance key'}) - tag = tags[0] - tags.should.have.length_of(1) - tag.res_id.should.equal(instance.id) - tag.res_type.should.equal('instance') - tag.name.should.equal("an instance key") - tag.value.should.equal("some value") - - -@mock_ec2_deprecated -def test_get_all_tags_value_filter(): - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance = reservation.instances[0] - instance.add_tag("an instance key", "some value") - reservation_b = conn.run_instances('ami-1234abcd') - instance_b = reservation_b.instances[0] - instance_b.add_tag("an instance key", "some other value") - reservation_c = conn.run_instances('ami-1234abcd') - instance_c = reservation_c.instances[0] - instance_c.add_tag("an instance key", "other value*") - reservation_d = conn.run_instances('ami-1234abcd') - instance_d = reservation_d.instances[0] - instance_d.add_tag("an instance key", "other value**") - reservation_e = conn.run_instances('ami-1234abcd') - instance_e = reservation_e.instances[0] - instance_e.add_tag("an instance key", "other value*?") - image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") - image = conn.get_image(image_id) - image.add_tag("an image key", "some value") - - tags = conn.get_all_tags(filters={'value': 'some value'}) - tags.should.have.length_of(2) - - tags = conn.get_all_tags(filters={'value': 'some*value'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*some*value'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*some*value*'}) - tags.should.have.length_of(3) - - tags = conn.get_all_tags(filters={'value': '*value\*'}) - tags.should.have.length_of(1) - - tags = conn.get_all_tags(filters={'value': '*value\*\*'}) - tags.should.have.length_of(1) - - tags = conn.get_all_tags(filters={'value': '*value\*\?'}) - tags.should.have.length_of(1) - - -@mock_ec2_deprecated -def test_retrieved_instances_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - - conn = boto.connect_ec2('the_key', 'the_secret') - reservation = conn.run_instances('ami-1234abcd') - reservation.should.be.a(Reservation) - reservation.instances.should.have.length_of(1) - instance = reservation.instances[0] - - reservations = conn.get_all_instances() - reservations.should.have.length_of(1) - reservations[0].id.should.equal(reservation.id) - instances = reservations[0].instances - instances.should.have.length_of(1) - instances[0].id.should.equal(instance.id) - - conn.create_tags([instance.id], tags_to_be_set) - reservations = conn.get_all_instances() - instance = reservations[0].instances[0] - retrieved_tags = instance.tags - - # Cleanup of instance - conn.terminate_instances([instances[0].id]) - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_retrieved_volumes_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2('the_key', 'the_secret') - volume = conn.create_volume(80, "us-east-1a") - - all_volumes = conn.get_all_volumes() - volume = all_volumes[0] - conn.create_tags([volume.id], tags_to_be_set) - - # Fetch the volume again - all_volumes = conn.get_all_volumes() - volume = all_volumes[0] - retrieved_tags = volume.tags - - volume.delete() - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_retrieved_snapshots_must_contain_their_tags(): - tag_key = 'Tag name' - tag_value = 'Tag value' - tags_to_be_set = {tag_key: tag_value} - conn = boto.connect_ec2(aws_access_key_id='the_key', - aws_secret_access_key='the_secret') - volume = conn.create_volume(80, "eu-west-1a") - snapshot = conn.create_snapshot(volume.id) - conn.create_tags([snapshot.id], tags_to_be_set) - - # Fetch the snapshot again - all_snapshots = conn.get_all_snapshots() - snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] - retrieved_tags = snapshot.tags - - conn.delete_snapshot(snapshot.id) - volume.delete() - - # Check whether tag is present with correct value - retrieved_tags[tag_key].should.equal(tag_value) - - -@mock_ec2_deprecated -def test_filter_instances_by_wildcard_tags(): - conn = boto.connect_ec2(aws_access_key_id='the_key', - aws_secret_access_key='the_secret') - reservation = conn.run_instances('ami-1234abcd') - instance_a = reservation.instances[0] - instance_a.add_tag("Key1", "Value1") - reservation_b = conn.run_instances('ami-1234abcd') - instance_b = reservation_b.instances[0] - instance_b.add_tag("Key1", "Value2") - - reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'}) - reservations.should.have.length_of(2) - - reservations = conn.get_all_instances(filters={'tag-key': 'Key*'}) - reservations.should.have.length_of(2) - - reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) - reservations.should.have.length_of(2) - - -@mock_ec2 -def test_create_volume_with_tags(): - client = boto3.client('ec2', 'us-west-2') - response = client.create_volume( - AvailabilityZone='us-west-2', - Encrypted=False, - Size=40, - TagSpecifications=[ - { - 'ResourceType': 'volume', - 'Tags': [ - { - 'Key': 'TEST_TAG', - 'Value': 'TEST_VALUE' - } - ], - } - ] - ) - - assert response['Tags'][0]['Key'] == 'TEST_TAG' - - -@mock_ec2 -def test_create_snapshot_with_tags(): - client = boto3.client('ec2', 'us-west-2') - volume_id = client.create_volume( - AvailabilityZone='us-west-2', - Encrypted=False, - Size=40, - TagSpecifications=[ - { - 'ResourceType': 'volume', - 'Tags': [ - { - 'Key': 'TEST_TAG', - 'Value': 'TEST_VALUE' - } - ], - } - ] - )['VolumeId'] - snapshot = client.create_snapshot( - VolumeId=volume_id, - TagSpecifications=[ - { - 'ResourceType': 'snapshot', - 'Tags': [ - { - 'Key': 'TEST_SNAPSHOT_TAG', - 'Value': 'TEST_SNAPSHOT_VALUE' - } - ], - } - ] - ) - - expected_tags = [{ - 'Key': 'TEST_SNAPSHOT_TAG', - 'Value': 'TEST_SNAPSHOT_VALUE' - }] - - assert snapshot['Tags'] == expected_tags +from __future__ import unicode_literals +from nose.tools import assert_raises + +import itertools + +import boto +import boto3 +from boto.exception import EC2ResponseError +from boto.ec2.instance import Reservation +import sure # noqa + +from moto import mock_ec2_deprecated, mock_ec2 +from nose.tools import assert_raises + + +@mock_ec2_deprecated +def test_add_tag(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as ex: + instance.add_tag("a key", "some value", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + instance.add_tag("a key", "some value") + chain = itertools.chain.from_iterable + existing_instances = list( + chain([res.instances for res in conn.get_all_instances()])) + existing_instances.should.have.length_of(1) + existing_instance = existing_instances[0] + existing_instance.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_remove_tag(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + with assert_raises(EC2ResponseError) as ex: + instance.remove_tag("a key", dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set') + + instance.remove_tag("a key") + conn.get_all_tags().should.have.length_of(0) + + instance.add_tag("a key", "some value") + conn.get_all_tags().should.have.length_of(1) + instance.remove_tag("a key", "some value") + + +@mock_ec2_deprecated +def test_get_all_tags(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_with_special_characters(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + instance.add_tag("a key", "some<> value") + + tags = conn.get_all_tags() + tag = tags[0] + tag.name.should.equal("a key") + tag.value.should.equal("some<> value") + + +@mock_ec2_deprecated +def test_create_tags(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + tag_dict = {'a key': 'some value', + 'another key': 'some other value', + 'blank key': ''} + + with assert_raises(EC2ResponseError) as ex: + conn.create_tags(instance.id, tag_dict, dry_run=True) + ex.exception.error_code.should.equal('DryRunOperation') + ex.exception.status.should.equal(400) + ex.exception.message.should.equal( + 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') + + conn.create_tags(instance.id, tag_dict) + tags = conn.get_all_tags() + set([key for key in tag_dict]).should.equal( + set([tag.name for tag in tags])) + set([tag_dict[key] for key in tag_dict]).should.equal( + set([tag.value for tag in tags])) + + +@mock_ec2_deprecated +def test_tag_limit_exceeded(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + tag_dict = {} + for i in range(51): + tag_dict['{0:02d}'.format(i + 1)] = '' + + with assert_raises(EC2ResponseError) as cm: + conn.create_tags(instance.id, tag_dict) + cm.exception.code.should.equal('TagLimitExceeded') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + instance.add_tag("a key", "a value") + with assert_raises(EC2ResponseError) as cm: + conn.create_tags(instance.id, tag_dict) + cm.exception.code.should.equal('TagLimitExceeded') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + tags = conn.get_all_tags() + tag = tags[0] + tags.should.have.length_of(1) + tag.name.should.equal("a key") + tag.value.should.equal("a value") + + +@mock_ec2_deprecated +def test_invalid_parameter_tag_null(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + + with assert_raises(EC2ResponseError) as cm: + instance.add_tag("a key", None) + cm.exception.code.should.equal('InvalidParameterValue') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_invalid_id(): + conn = boto.connect_ec2('the_key', 'the_secret') + with assert_raises(EC2ResponseError) as cm: + conn.create_tags('ami-blah', {'key': 'tag'}) + cm.exception.code.should.equal('InvalidID') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + with assert_raises(EC2ResponseError) as cm: + conn.create_tags('blah-blah', {'key': 'tag'}) + cm.exception.code.should.equal('InvalidID') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_get_all_tags_resource_id_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'resource-id': instance.id}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + tags = conn.get_all_tags(filters={'resource-id': image_id}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(image_id) + tag.res_type.should.equal('image') + tag.name.should.equal("an image key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_resource_type_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'resource-type': 'instance'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + tags = conn.get_all_tags(filters={'resource-type': 'image'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(image_id) + tag.res_type.should.equal('image') + tag.name.should.equal("an image key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_key_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'key': 'an instance key'}) + tag = tags[0] + tags.should.have.length_of(1) + tag.res_id.should.equal(instance.id) + tag.res_type.should.equal('instance') + tag.name.should.equal("an instance key") + tag.value.should.equal("some value") + + +@mock_ec2_deprecated +def test_get_all_tags_value_filter(): + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance = reservation.instances[0] + instance.add_tag("an instance key", "some value") + reservation_b = conn.run_instances('ami-1234abcd') + instance_b = reservation_b.instances[0] + instance_b.add_tag("an instance key", "some other value") + reservation_c = conn.run_instances('ami-1234abcd') + instance_c = reservation_c.instances[0] + instance_c.add_tag("an instance key", "other value*") + reservation_d = conn.run_instances('ami-1234abcd') + instance_d = reservation_d.instances[0] + instance_d.add_tag("an instance key", "other value**") + reservation_e = conn.run_instances('ami-1234abcd') + instance_e = reservation_e.instances[0] + instance_e.add_tag("an instance key", "other value*?") + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + image = conn.get_image(image_id) + image.add_tag("an image key", "some value") + + tags = conn.get_all_tags(filters={'value': 'some value'}) + tags.should.have.length_of(2) + + tags = conn.get_all_tags(filters={'value': 'some*value'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*some*value'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*some*value*'}) + tags.should.have.length_of(3) + + tags = conn.get_all_tags(filters={'value': '*value\*'}) + tags.should.have.length_of(1) + + tags = conn.get_all_tags(filters={'value': '*value\*\*'}) + tags.should.have.length_of(1) + + tags = conn.get_all_tags(filters={'value': '*value\*\?'}) + tags.should.have.length_of(1) + + +@mock_ec2_deprecated +def test_retrieved_instances_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + + conn = boto.connect_ec2('the_key', 'the_secret') + reservation = conn.run_instances('ami-1234abcd') + reservation.should.be.a(Reservation) + reservation.instances.should.have.length_of(1) + instance = reservation.instances[0] + + reservations = conn.get_all_instances() + reservations.should.have.length_of(1) + reservations[0].id.should.equal(reservation.id) + instances = reservations[0].instances + instances.should.have.length_of(1) + instances[0].id.should.equal(instance.id) + + conn.create_tags([instance.id], tags_to_be_set) + reservations = conn.get_all_instances() + instance = reservations[0].instances[0] + retrieved_tags = instance.tags + + # Cleanup of instance + conn.terminate_instances([instances[0].id]) + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_retrieved_volumes_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + conn = boto.connect_ec2('the_key', 'the_secret') + volume = conn.create_volume(80, "us-east-1a") + + all_volumes = conn.get_all_volumes() + volume = all_volumes[0] + conn.create_tags([volume.id], tags_to_be_set) + + # Fetch the volume again + all_volumes = conn.get_all_volumes() + volume = all_volumes[0] + retrieved_tags = volume.tags + + volume.delete() + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_retrieved_snapshots_must_contain_their_tags(): + tag_key = 'Tag name' + tag_value = 'Tag value' + tags_to_be_set = {tag_key: tag_value} + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') + volume = conn.create_volume(80, "eu-west-1a") + snapshot = conn.create_snapshot(volume.id) + conn.create_tags([snapshot.id], tags_to_be_set) + + # Fetch the snapshot again + all_snapshots = conn.get_all_snapshots() + snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] + retrieved_tags = snapshot.tags + + conn.delete_snapshot(snapshot.id) + volume.delete() + + # Check whether tag is present with correct value + retrieved_tags[tag_key].should.equal(tag_value) + + +@mock_ec2_deprecated +def test_filter_instances_by_wildcard_tags(): + conn = boto.connect_ec2(aws_access_key_id='the_key', + aws_secret_access_key='the_secret') + reservation = conn.run_instances('ami-1234abcd') + instance_a = reservation.instances[0] + instance_a.add_tag("Key1", "Value1") + reservation_b = conn.run_instances('ami-1234abcd') + instance_b = reservation_b.instances[0] + instance_b.add_tag("Key1", "Value2") + + reservations = conn.get_all_instances(filters={'tag:Key1': 'Value*'}) + reservations.should.have.length_of(2) + + reservations = conn.get_all_instances(filters={'tag-key': 'Key*'}) + reservations.should.have.length_of(2) + + reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) + reservations.should.have.length_of(2) + + +@mock_ec2 +def test_create_volume_with_tags(): + client = boto3.client('ec2', 'us-west-2') + response = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + ) + + assert response['Tags'][0]['Key'] == 'TEST_TAG' + + +@mock_ec2 +def test_create_snapshot_with_tags(): + client = boto3.client('ec2', 'us-west-2') + volume_id = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + )['VolumeId'] + snapshot = client.create_snapshot( + VolumeId=volume_id, + TagSpecifications=[ + { + 'ResourceType': 'snapshot', + 'Tags': [ + { + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + } + ], + } + ] + ) + + expected_tags = [{ + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + }] + + assert snapshot['Tags'] == expected_tags diff --git a/tests/test_ec2/test_utils.py b/tests/test_ec2/test_utils.py index ef540e193..3e7a37a7a 100644 --- a/tests/test_ec2/test_utils.py +++ b/tests/test_ec2/test_utils.py @@ -1,8 +1,8 @@ -from moto.ec2 import utils - - -def test_random_key_pair(): - key_pair = utils.random_key_pair() - assert len(key_pair['fingerprint']) == 59 - assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') - assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') +from moto.ec2 import utils + + +def test_random_key_pair(): + key_pair = utils.random_key_pair() + assert len(key_pair['fingerprint']) == 59 + assert key_pair['material'].startswith('---- BEGIN RSA PRIVATE KEY ----') + assert key_pair['material'].endswith('-----END RSA PRIVATE KEY-----') diff --git a/tests/test_ec2/test_virtual_private_gateways.py b/tests/test_ec2/test_virtual_private_gateways.py index d90e97b45..a57bdc59f 100644 --- a/tests/test_ec2/test_virtual_private_gateways.py +++ b/tests/test_ec2/test_virtual_private_gateways.py @@ -1,105 +1,105 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_virtual_private_gateways(): - conn = boto.connect_vpc('the_key', 'the_secret') - - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - vpn_gateway.should_not.be.none - vpn_gateway.id.should.match(r'vgw-\w+') - vpn_gateway.type.should.equal('ipsec.1') - vpn_gateway.state.should.equal('available') - vpn_gateway.availability_zone.should.equal('us-east-1a') - - -@mock_ec2_deprecated -def test_describe_vpn_gateway(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(1) - - gateway = vgws[0] - gateway.id.should.match(r'vgw-\w+') - gateway.id.should.equal(vpn_gateway.id) - vpn_gateway.type.should.equal('ipsec.1') - vpn_gateway.state.should.equal('available') - vpn_gateway.availability_zone.should.equal('us-east-1a') - - -@mock_ec2_deprecated -def test_vpn_gateway_vpc_attachment(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.attach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal('attached') - - -@mock_ec2_deprecated -def test_delete_vpn_gateway(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.delete_vpn_gateway(vpn_gateway.id) - vgws = conn.get_all_vpn_gateways() - vgws.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpn_gateway_tagging(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - vpn_gateway.add_tag("a key", "some value") - - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the subnet - vpn_gateway = conn.get_all_vpn_gateways()[0] - vpn_gateway.tags.should.have.length_of(1) - vpn_gateway.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_detach_vpn_gateway(): - - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') - - conn.attach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(1) - attachments[0].vpc_id.should.equal(vpc.id) - attachments[0].state.should.equal('attached') - - conn.detach_vpn_gateway( - vpn_gateway_id=vpn_gateway.id, - vpc_id=vpc.id - ) - - gateway = conn.get_all_vpn_gateways()[0] - attachments = gateway.attachments - attachments.should.have.length_of(0) +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_virtual_private_gateways(): + conn = boto.connect_vpc('the_key', 'the_secret') + + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + vpn_gateway.should_not.be.none + vpn_gateway.id.should.match(r'vgw-\w+') + vpn_gateway.type.should.equal('ipsec.1') + vpn_gateway.state.should.equal('available') + vpn_gateway.availability_zone.should.equal('us-east-1a') + + +@mock_ec2_deprecated +def test_describe_vpn_gateway(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(1) + + gateway = vgws[0] + gateway.id.should.match(r'vgw-\w+') + gateway.id.should.equal(vpn_gateway.id) + vpn_gateway.type.should.equal('ipsec.1') + vpn_gateway.state.should.equal('available') + vpn_gateway.availability_zone.should.equal('us-east-1a') + + +@mock_ec2_deprecated +def test_vpn_gateway_vpc_attachment(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.attach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal('attached') + + +@mock_ec2_deprecated +def test_delete_vpn_gateway(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.delete_vpn_gateway(vpn_gateway.id) + vgws = conn.get_all_vpn_gateways() + vgws.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpn_gateway_tagging(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + vpn_gateway.add_tag("a key", "some value") + + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the subnet + vpn_gateway = conn.get_all_vpn_gateways()[0] + vpn_gateway.tags.should.have.length_of(1) + vpn_gateway.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_detach_vpn_gateway(): + + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpn_gateway = conn.create_vpn_gateway('ipsec.1', 'us-east-1a') + + conn.attach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(1) + attachments[0].vpc_id.should.equal(vpc.id) + attachments[0].state.should.equal('attached') + + conn.detach_vpn_gateway( + vpn_gateway_id=vpn_gateway.id, + vpc_id=vpc.id + ) + + gateway = conn.get_all_vpn_gateways()[0] + attachments = gateway.attachments + attachments.should.have.length_of(0) diff --git a/tests/test_ec2/test_vm_export.py b/tests/test_ec2/test_vm_export.py index f8b24f6d4..08215d067 100644 --- a/tests/test_ec2/test_vm_export.py +++ b/tests/test_ec2/test_vm_export.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_export(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_export(): + pass diff --git a/tests/test_ec2/test_vm_import.py b/tests/test_ec2/test_vm_import.py index 66c7561a7..0ebfaaa0c 100644 --- a/tests/test_ec2/test_vm_import.py +++ b/tests/test_ec2/test_vm_import.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_vm_import(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_vm_import(): + pass diff --git a/tests/test_ec2/test_vpc_peering.py b/tests/test_ec2/test_vpc_peering.py index 1f98791b3..4aab5f041 100644 --- a/tests/test_ec2/test_vpc_peering.py +++ b/tests/test_ec2/test_vpc_peering.py @@ -1,132 +1,132 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises -from nose.tools import assert_raises -from moto.ec2.exceptions import EC2ClientError -from botocore.exceptions import ClientError - -import boto3 -import boto -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated -from tests.helpers import requires_boto_gte - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - peer_vpc = conn.create_vpc("11.0.0.0/16") - - vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) - vpc_pcx._status.code.should.equal('initiating-request') - - return vpc_pcx - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_get_all(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - vpc_pcx._status.code.should.equal('initiating-request') - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance') - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_accept(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) - vpc_pcx._status.code.should.equal('active') - - with assert_raises(EC2ResponseError) as cm: - conn.reject_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal('InvalidStateTransition') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('active') - - -@requires_boto_gte("2.32.0") -@mock_ec2_deprecated -def test_vpc_peering_connections_reject(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) - verdict.should.equal(True) - - with assert_raises(EC2ResponseError) as cm: - conn.accept_vpc_peering_connection(vpc_pcx.id) - cm.exception.code.should.equal('InvalidStateTransition') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(1) - all_vpc_pcxs[0]._status.code.should.equal('rejected') - - -@requires_boto_gte("2.32.1") -@mock_ec2_deprecated -def test_vpc_peering_connections_delete(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc_pcx = test_vpc_peering_connections() - - verdict = vpc_pcx.delete() - verdict.should.equal(True) - - all_vpc_pcxs = conn.get_all_vpc_peering_connections() - all_vpc_pcxs.should.have.length_of(0) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc_peering_connection("pcx-1234abcd") - cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2 -def test_vpc_peering_connections_cross_region(): - # create vpc in us-west-1 and ap-northeast-1 - ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') - vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') - ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') - vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') - # create peering - vpc_pcx = ec2_usw1.create_vpc_peering_connection( - VpcId=vpc_usw1.id, - PeerVpcId=vpc_apn1.id, - PeerRegion='ap-northeast-1', - ) - vpc_pcx.status['Code'].should.equal('initiating-request') - vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) - vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) - - -@mock_ec2 -def test_vpc_peering_connections_cross_region_fail(): - # create vpc in us-west-1 and ap-northeast-1 - ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') - vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') - ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') - vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') - # create peering wrong region with no vpc - with assert_raises(ClientError) as cm: - ec2_usw1.create_vpc_peering_connection( - VpcId=vpc_usw1.id, - PeerVpcId=vpc_apn1.id, - PeerRegion='ap-northeast-2') - cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError + +import boto3 +import boto +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated +from tests.helpers import requires_boto_gte + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + peer_vpc = conn.create_vpc("11.0.0.0/16") + + vpc_pcx = conn.create_vpc_peering_connection(vpc.id, peer_vpc.id) + vpc_pcx._status.code.should.equal('initiating-request') + + return vpc_pcx + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_get_all(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + vpc_pcx._status.code.should.equal('initiating-request') + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('pending-acceptance') + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_accept(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + vpc_pcx = conn.accept_vpc_peering_connection(vpc_pcx.id) + vpc_pcx._status.code.should.equal('active') + + with assert_raises(EC2ResponseError) as cm: + conn.reject_vpc_peering_connection(vpc_pcx.id) + cm.exception.code.should.equal('InvalidStateTransition') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('active') + + +@requires_boto_gte("2.32.0") +@mock_ec2_deprecated +def test_vpc_peering_connections_reject(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + verdict = conn.reject_vpc_peering_connection(vpc_pcx.id) + verdict.should.equal(True) + + with assert_raises(EC2ResponseError) as cm: + conn.accept_vpc_peering_connection(vpc_pcx.id) + cm.exception.code.should.equal('InvalidStateTransition') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(1) + all_vpc_pcxs[0]._status.code.should.equal('rejected') + + +@requires_boto_gte("2.32.1") +@mock_ec2_deprecated +def test_vpc_peering_connections_delete(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc_pcx = test_vpc_peering_connections() + + verdict = vpc_pcx.delete() + verdict.should.equal(True) + + all_vpc_pcxs = conn.get_all_vpc_peering_connections() + all_vpc_pcxs.should.have.length_of(0) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc_peering_connection("pcx-1234abcd") + cm.exception.code.should.equal('InvalidVpcPeeringConnectionId.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2 +def test_vpc_peering_connections_cross_region(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering + vpc_pcx = ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-1', + ) + vpc_pcx.status['Code'].should.equal('initiating-request') + vpc_pcx.requester_vpc.id.should.equal(vpc_usw1.id) + vpc_pcx.accepter_vpc.id.should.equal(vpc_apn1.id) + + +@mock_ec2 +def test_vpc_peering_connections_cross_region_fail(): + # create vpc in us-west-1 and ap-northeast-1 + ec2_usw1 = boto3.resource('ec2', region_name='us-west-1') + vpc_usw1 = ec2_usw1.create_vpc(CidrBlock='10.90.0.0/16') + ec2_apn1 = boto3.resource('ec2', region_name='ap-northeast-1') + vpc_apn1 = ec2_apn1.create_vpc(CidrBlock='10.20.0.0/16') + # create peering wrong region with no vpc + with assert_raises(ClientError) as cm: + ec2_usw1.create_vpc_peering_connection( + VpcId=vpc_usw1.id, + PeerVpcId=vpc_apn1.id, + PeerRegion='ap-northeast-2') + cm.exception.response['Error']['Code'].should.equal('InvalidVpcID.NotFound') diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index 318491b44..4556e5ea0 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -1,541 +1,541 @@ -from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # flake8: noqa -from nose.tools import assert_raises -from moto.ec2.exceptions import EC2ClientError -from botocore.exceptions import ClientError - -import boto3 -import boto -from boto.exception import EC2ResponseError -import sure # noqa - -from moto import mock_ec2, mock_ec2_deprecated - -SAMPLE_DOMAIN_NAME = u'example.com' -SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] - - -@mock_ec2_deprecated -def test_vpcs(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - vpc.cidr_block.should.equal('10.0.0.0/16') - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(2) - - vpc.delete() - - all_vpcs = conn.get_all_vpcs() - all_vpcs.should.have.length_of(1) - - with assert_raises(EC2ResponseError) as cm: - conn.delete_vpc("vpc-1234abcd") - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_defaults(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - - conn.get_all_vpcs().should.have.length_of(2) - conn.get_all_route_tables().should.have.length_of(2) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(1) - - vpc.delete() - - conn.get_all_vpcs().should.have.length_of(1) - conn.get_all_route_tables().should.have.length_of(1) - conn.get_all_security_groups( - filters={'vpc-id': [vpc.id]}).should.have.length_of(0) - - -@mock_ec2_deprecated -def test_vpc_isdefault_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - vpc.delete() - conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) - - -@mock_ec2_deprecated -def test_multiple_vpcs_default_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - conn.create_vpc("10.8.0.0/16") - conn.create_vpc("10.0.0.0/16") - conn.create_vpc("192.168.0.0/16") - conn.get_all_vpcs().should.have.length_of(4) - vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) - vpc.should.have.length_of(1) - vpc[0].cidr_block.should.equal('172.31.0.0/16') - - -@mock_ec2_deprecated -def test_vpc_state_available_filter(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.1.0.0/16") - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) - vpc.delete() - conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) - - -@mock_ec2_deprecated -def test_vpc_tagging(): - conn = boto.connect_vpc() - vpc = conn.create_vpc("10.0.0.0/16") - - vpc.add_tag("a key", "some value") - tag = conn.get_all_tags()[0] - tag.name.should.equal("a key") - tag.value.should.equal("some value") - - # Refresh the vpc - vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] - vpc.tags.should.have.length_of(1) - vpc.tags["a key"].should.equal("some value") - - -@mock_ec2_deprecated -def test_vpc_get_by_id(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/16") - - vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - with assert_raises(EC2ResponseError) as cm: - conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) - cm.exception.code.should.equal('InvalidVpcID.NotFound') - cm.exception.status.should.equal(400) - cm.exception.request_id.should_not.be.none - - -@mock_ec2_deprecated -def test_vpc_get_by_cidr_block(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_dhcp_options_id(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - conn.associate_dhcp_options(dhcp_options.id, vpc1.id) - conn.associate_dhcp_options(dhcp_options.id, vpc2.id) - - vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc2.add_tag('Name', 'TestVPC') - vpc3.add_tag('Name', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_key_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Test', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_superset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - vpc3 = conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - vpc3.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2_deprecated -def test_vpc_get_by_tag_value_subset(): - conn = boto.connect_vpc() - vpc1 = conn.create_vpc("10.0.0.0/16") - vpc2 = conn.create_vpc("10.0.0.0/16") - conn.create_vpc("10.0.0.0/24") - - vpc1.add_tag('Name', 'TestVPC') - vpc1.add_tag('Key', 'TestVPC2') - vpc2.add_tag('Name', 'TestVPC') - vpc2.add_tag('Key', 'TestVPC2') - - vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) - vpcs.should.have.length_of(2) - vpc_ids = tuple(map(lambda v: v.id, vpcs)) - vpc1.id.should.be.within(vpc_ids) - vpc2.id.should.be.within(vpc_ids) - - -@mock_ec2 -def test_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - default_vpc = list(ec2.vpcs.all())[0] - default_vpc.cidr_block.should.equal('172.31.0.0/16') - default_vpc.instance_tenancy.should.equal('default') - default_vpc.reload() - default_vpc.is_default.should.be.ok - - # Test default values for VPC attributes - response = default_vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2 -def test_non_default_vpc(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - this already exists when backend instantiated! - #ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - # Test default instance_tenancy - vpc.instance_tenancy.should.equal('default') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - # Check Primary CIDR Block Associations - cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) - cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') - cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) - cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - -@mock_ec2 -def test_vpc_dedicated_tenancy(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - # Create the non default VPC - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') - vpc.reload() - vpc.is_default.shouldnt.be.ok - - vpc.instance_tenancy.should.equal('dedicated') - - -@mock_ec2 -def test_vpc_modify_enable_dns_support(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').should.be.ok - - vpc.modify_attribute(EnableDnsSupport={'Value': False}) - - response = vpc.describe_attribute(Attribute='enableDnsSupport') - attr = response.get('EnableDnsSupport') - attr.get('Value').shouldnt.be.ok - - -@mock_ec2 -def test_vpc_modify_enable_dns_hostnames(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').shouldnt.be.ok - - vpc.modify_attribute(EnableDnsHostnames={'Value': True}) - - response = vpc.describe_attribute(Attribute='enableDnsHostnames') - attr = response.get('EnableDnsHostnames') - attr.get('Value').should.be.ok - - -@mock_ec2_deprecated -def test_vpc_associate_dhcp_options(): - conn = boto.connect_vpc() - dhcp_options = conn.create_dhcp_options( - SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) - vpc = conn.create_vpc("10.0.0.0/16") - - conn.associate_dhcp_options(dhcp_options.id, vpc.id) - - vpc.update() - dhcp_options.id.should.equal(vpc.dhcp_options_id) - - -@mock_ec2 -def test_associate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - - # Associate/Extend vpc CIDR range up to 5 ciders - for i in range(43, 47): - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') - response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) - response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') - - # Check all associations exist - vpc = ec2.Vpc(vpc.id) - vpc.cidr_block_association_set.should.have.length_of(5) - vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') - vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') - - # Check error on adding 6th association. - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) - -@mock_ec2 -def test_disassociate_vpc_ipv4_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') - - # Remove an extended cidr block - vpc = ec2.Vpc(vpc.id) - non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) - response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') - response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) - response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) - - # Error attempting to delete a non-existent CIDR_BLOCK association - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') - str(ex.exception).should.equal( - "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " - "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " - "'vpc-cidr-assoc-BORING123' does not exist") - - # Error attempting to delete Primary CIDR BLOCK association - vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set - if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] - - with assert_raises(ClientError) as ex: - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) - str(ex.exception).should.equal( - "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " - "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " - "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) - -@mock_ec2 -def test_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') - - # Test filters for a cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', - 'Values': ['10.10.0.0/19']}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association id in VPCs - association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': [association_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', - 'Values': ['failing']}])) - filtered_vpcs.should.be.length_of(0) - -@mock_ec2 -def test_vpc_associate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) - ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') - ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') - ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') - - # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! - with assert_raises(ClientError) as ex: - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - str(ex.exception).should.equal( - "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " - "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) - - # Test associate ipv6 cidr block after vpc created - vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') - - # Check on describe vpc that has ipv6 cidr block association - vpc = ec2.Vpc(vpc.id) - vpc.ipv6_cidr_block_association_set.should.be.length_of(1) - - -@mock_ec2 -def test_vpc_disassociate_ipv6_cidr_block(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - - # Test create VPC with IPV6 cidr range - vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) - # Test disassociating the only IPV6 - assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] - response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') - response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') - response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) - - -@mock_ec2 -def test_ipv6_cidr_block_association_filters(): - ec2 = boto3.resource('ec2', region_name='us-west-1') - vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') - - vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) - vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') - - vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') - ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') - response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) - vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] - - vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks - - # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', - 'Values': [vpc3_ipv6_cidr_block]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc3.id) - - # Test filter for association id in VPCs - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', - 'Values': [vpc2_assoc_ipv6_assoc_id]}])) - filtered_vpcs.should.be.length_of(1) - filtered_vpcs[0].id.should.equal(vpc2.id) - - # Test filter for association state in VPC - this will never show anything in this test - filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', - 'Values': ['associated']}])) - filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs +from __future__ import unicode_literals +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # flake8: noqa +from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError + +import boto3 +import boto +from boto.exception import EC2ResponseError +import sure # noqa + +from moto import mock_ec2, mock_ec2_deprecated + +SAMPLE_DOMAIN_NAME = u'example.com' +SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] + + +@mock_ec2_deprecated +def test_vpcs(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + vpc.cidr_block.should.equal('10.0.0.0/16') + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(2) + + vpc.delete() + + all_vpcs = conn.get_all_vpcs() + all_vpcs.should.have.length_of(1) + + with assert_raises(EC2ResponseError) as cm: + conn.delete_vpc("vpc-1234abcd") + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_defaults(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + + conn.get_all_vpcs().should.have.length_of(2) + conn.get_all_route_tables().should.have.length_of(2) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(1) + + vpc.delete() + + conn.get_all_vpcs().should.have.length_of(1) + conn.get_all_route_tables().should.have.length_of(1) + conn.get_all_security_groups( + filters={'vpc-id': [vpc.id]}).should.have.length_of(0) + + +@mock_ec2_deprecated +def test_vpc_isdefault_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + vpc.delete() + conn.get_all_vpcs(filters={'isDefault': 'true'}).should.have.length_of(1) + + +@mock_ec2_deprecated +def test_multiple_vpcs_default_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + conn.create_vpc("10.8.0.0/16") + conn.create_vpc("10.0.0.0/16") + conn.create_vpc("192.168.0.0/16") + conn.get_all_vpcs().should.have.length_of(4) + vpc = conn.get_all_vpcs(filters={'isDefault': 'true'}) + vpc.should.have.length_of(1) + vpc[0].cidr_block.should.equal('172.31.0.0/16') + + +@mock_ec2_deprecated +def test_vpc_state_available_filter(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpc = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.1.0.0/16") + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(3) + vpc.delete() + conn.get_all_vpcs(filters={'state': 'available'}).should.have.length_of(2) + + +@mock_ec2_deprecated +def test_vpc_tagging(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.0.0.0/16") + + vpc.add_tag("a key", "some value") + tag = conn.get_all_tags()[0] + tag.name.should.equal("a key") + tag.value.should.equal("some value") + + # Refresh the vpc + vpc = conn.get_all_vpcs(vpc_ids=[vpc.id])[0] + vpc.tags.should.have.length_of(1) + vpc.tags["a key"].should.equal("some value") + + +@mock_ec2_deprecated +def test_vpc_get_by_id(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/16") + + vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + with assert_raises(EC2ResponseError) as cm: + conn.get_all_vpcs(vpc_ids=['vpc-does_not_exist']) + cm.exception.code.should.equal('InvalidVpcID.NotFound') + cm.exception.status.should.equal(400) + cm.exception.request_id.should_not.be.none + + +@mock_ec2_deprecated +def test_vpc_get_by_cidr_block(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_dhcp_options_id(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + conn.associate_dhcp_options(dhcp_options.id, vpc1.id) + conn.associate_dhcp_options(dhcp_options.id, vpc2.id) + + vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc2.add_tag('Name', 'TestVPC') + vpc3.add_tag('Name', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_key_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Test', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_superset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + vpc3 = conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + vpc3.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2_deprecated +def test_vpc_get_by_tag_value_subset(): + conn = boto.connect_vpc() + vpc1 = conn.create_vpc("10.0.0.0/16") + vpc2 = conn.create_vpc("10.0.0.0/16") + conn.create_vpc("10.0.0.0/24") + + vpc1.add_tag('Name', 'TestVPC') + vpc1.add_tag('Key', 'TestVPC2') + vpc2.add_tag('Name', 'TestVPC') + vpc2.add_tag('Key', 'TestVPC2') + + vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) + vpcs.should.have.length_of(2) + vpc_ids = tuple(map(lambda v: v.id, vpcs)) + vpc1.id.should.be.within(vpc_ids) + vpc2.id.should.be.within(vpc_ids) + + +@mock_ec2 +def test_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + default_vpc = list(ec2.vpcs.all())[0] + default_vpc.cidr_block.should.equal('172.31.0.0/16') + default_vpc.instance_tenancy.should.equal('default') + default_vpc.reload() + default_vpc.is_default.should.be.ok + + # Test default values for VPC attributes + response = default_vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2 +def test_non_default_vpc(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + # Test default instance_tenancy + vpc.instance_tenancy.should.equal('default') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + +@mock_ec2 +def test_vpc_dedicated_tenancy(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + # Create the non default VPC + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16', InstanceTenancy='dedicated') + vpc.reload() + vpc.is_default.shouldnt.be.ok + + vpc.instance_tenancy.should.equal('dedicated') + + +@mock_ec2 +def test_vpc_modify_enable_dns_support(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').should.be.ok + + vpc.modify_attribute(EnableDnsSupport={'Value': False}) + + response = vpc.describe_attribute(Attribute='enableDnsSupport') + attr = response.get('EnableDnsSupport') + attr.get('Value').shouldnt.be.ok + + +@mock_ec2 +def test_vpc_modify_enable_dns_hostnames(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Create the default VPC + ec2.create_vpc(CidrBlock='172.31.0.0/16') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + # Test default values for VPC attributes + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').shouldnt.be.ok + + vpc.modify_attribute(EnableDnsHostnames={'Value': True}) + + response = vpc.describe_attribute(Attribute='enableDnsHostnames') + attr = response.get('EnableDnsHostnames') + attr.get('Value').should.be.ok + + +@mock_ec2_deprecated +def test_vpc_associate_dhcp_options(): + conn = boto.connect_vpc() + dhcp_options = conn.create_dhcp_options( + SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) + vpc = conn.create_vpc("10.0.0.0/16") + + conn.associate_dhcp_options(dhcp_options.id, vpc.id) + + vpc.update() + dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs diff --git a/tests/test_ec2/test_vpn_connections.py b/tests/test_ec2/test_vpn_connections.py index e95aa76ee..70c3f3e33 100644 --- a/tests/test_ec2/test_vpn_connections.py +++ b/tests/test_ec2/test_vpn_connections.py @@ -1,51 +1,51 @@ -from __future__ import unicode_literals -import boto -from nose.tools import assert_raises -import sure # noqa -from boto.exception import EC2ResponseError - -from moto import mock_ec2_deprecated - - -@mock_ec2_deprecated -def test_create_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection( - 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - vpn_connection.should_not.be.none - vpn_connection.id.should.match(r'vpn-\w+') - vpn_connection.type.should.equal('ipsec.1') - - -@mock_ec2_deprecated -def test_delete_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpn_connection = conn.create_vpn_connection( - 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(1) - conn.delete_vpn_connection(vpn_connection.id) - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(0) - - -@mock_ec2_deprecated -def test_delete_vpn_connections_bad_id(): - conn = boto.connect_vpc('the_key', 'the_secret') - with assert_raises(EC2ResponseError): - conn.delete_vpn_connection('vpn-0123abcd') - - -@mock_ec2_deprecated -def test_describe_vpn_connections(): - conn = boto.connect_vpc('the_key', 'the_secret') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(0) - conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(1) - vpn = conn.create_vpn_connection('ipsec.1', 'vgw-1234abcd', 'cgw-1234abcd') - list_of_vpn_connections = conn.get_all_vpn_connections() - list_of_vpn_connections.should.have.length_of(2) - list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) - list_of_vpn_connections.should.have.length_of(1) +from __future__ import unicode_literals +import boto +from nose.tools import assert_raises +import sure # noqa +from boto.exception import EC2ResponseError + +from moto import mock_ec2_deprecated + + +@mock_ec2_deprecated +def test_create_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + vpn_connection.should_not.be.none + vpn_connection.id.should.match(r'vpn-\w+') + vpn_connection.type.should.equal('ipsec.1') + + +@mock_ec2_deprecated +def test_delete_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + vpn_connection = conn.create_vpn_connection( + 'ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(1) + conn.delete_vpn_connection(vpn_connection.id) + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(0) + + +@mock_ec2_deprecated +def test_delete_vpn_connections_bad_id(): + conn = boto.connect_vpc('the_key', 'the_secret') + with assert_raises(EC2ResponseError): + conn.delete_vpn_connection('vpn-0123abcd') + + +@mock_ec2_deprecated +def test_describe_vpn_connections(): + conn = boto.connect_vpc('the_key', 'the_secret') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(0) + conn.create_vpn_connection('ipsec.1', 'vgw-0123abcd', 'cgw-0123abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(1) + vpn = conn.create_vpn_connection('ipsec.1', 'vgw-1234abcd', 'cgw-1234abcd') + list_of_vpn_connections = conn.get_all_vpn_connections() + list_of_vpn_connections.should.have.length_of(2) + list_of_vpn_connections = conn.get_all_vpn_connections(vpn.id) + list_of_vpn_connections.should.have.length_of(1) diff --git a/tests/test_ec2/test_windows.py b/tests/test_ec2/test_windows.py index 364ac2f8a..ae2f7b29a 100644 --- a/tests/test_ec2/test_windows.py +++ b/tests/test_ec2/test_windows.py @@ -1,10 +1,10 @@ -from __future__ import unicode_literals -import boto -import sure # noqa - -from moto import mock_ec2 - - -@mock_ec2 -def test_windows(): - pass +from __future__ import unicode_literals +import boto +import sure # noqa + +from moto import mock_ec2 + + +@mock_ec2 +def test_windows(): + pass diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index c0cef81a9..3ce48d87d 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -1,697 +1,697 @@ -from __future__ import unicode_literals - -import hashlib -import json -from datetime import datetime -from random import random - -import re -import sure # noqa - -import boto3 -from botocore.exceptions import ClientError, ParamValidationError -from dateutil.tz import tzlocal - -from moto import mock_ecr - - -def _create_image_digest(contents=None): - if not contents: - contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) - return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest() - - -def _create_image_manifest(): - return { - "schemaVersion": 2, - "mediaType": "application/vnd.docker.distribution.manifest.v2+json", - "config": - { - "mediaType": "application/vnd.docker.container.image.v1+json", - "size": 7023, - "digest": _create_image_digest("config") - }, - "layers": [ - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 32654, - "digest": _create_image_digest("layer1") - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 16724, - "digest": _create_image_digest("layer2") - }, - { - "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", - "size": 73109, - # randomize image digest - "digest": _create_image_digest() - } - ] - } - - -@mock_ecr -def test_create_repository(): - client = boto3.client('ecr', region_name='us-east-1') - response = client.create_repository( - repositoryName='test_ecr_repository' - ) - response['repository']['repositoryName'].should.equal('test_ecr_repository') - response['repository']['repositoryArn'].should.equal( - 'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository') - response['repository']['registryId'].should.equal('012345678910') - response['repository']['repositoryUri'].should.equal( - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository') - # response['repository']['createdAt'].should.equal(0) - - -@mock_ecr -def test_describe_repositories(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories() - len(response['repositories']).should.equal(2) - - respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] - set([response['repositories'][0]['repositoryArn'], - response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) - - respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] - set([response['repositories'][0]['repositoryUri'], - response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) - - -@mock_ecr -def test_describe_repositories_1(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(registryId='012345678910') - len(response['repositories']).should.equal(2) - - respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] - set([response['repositories'][0]['repositoryArn'], - response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) - - respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] - set([response['repositories'][0]['repositoryUri'], - response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) - - -@mock_ecr -def test_describe_repositories_2(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(registryId='109876543210') - len(response['repositories']).should.equal(0) - - -@mock_ecr -def test_describe_repositories_3(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository1' - ) - _ = client.create_repository( - repositoryName='test_repository0' - ) - response = client.describe_repositories(repositoryNames=['test_repository1']) - len(response['repositories']).should.equal(1) - respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1' - response['repositories'][0]['repositoryArn'].should.equal(respository_arn) - - respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1' - response['repositories'][0]['repositoryUri'].should.equal(respository_uri) - - -@mock_ecr -def test_describe_repositories_with_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - response = client.describe_repositories(repositoryNames=['test_repository']) - len(response['repositories']).should.equal(1) - - -@mock_ecr -def test_delete_repository(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - response = client.delete_repository(repositoryName='test_repository') - response['repository']['repositoryName'].should.equal('test_repository') - response['repository']['repositoryArn'].should.equal( - 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository') - response['repository']['registryId'].should.equal('012345678910') - response['repository']['repositoryUri'].should.equal( - '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository') - # response['repository']['createdAt'].should.equal(0) - - response = client.describe_repositories() - len(response['repositories']).should.equal(0) - - -@mock_ecr -def test_put_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - response['image']['imageId']['imageTag'].should.equal('latest') - response['image']['imageId']['imageDigest'].should.contain("sha") - response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') - -@mock_ecr -def test_put_image_with_multiple_tags(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - manifest = _create_image_manifest() - response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='v1' - ) - - response['image']['imageId']['imageTag'].should.equal('v1') - response['image']['imageId']['imageDigest'].should.contain("sha") - response['image']['repositoryName'].should.equal('test_repository') - response['image']['registryId'].should.equal('012345678910') - - response1 = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='latest' - ) - - response1['image']['imageId']['imageTag'].should.equal('latest') - response1['image']['imageId']['imageDigest'].should.contain("sha") - response1['image']['repositoryName'].should.equal('test_repository') - response1['image']['registryId'].should.equal('012345678910') - - response2 = client.describe_images(repositoryName='test_repository') - type(response2['imageDetails']).should.be(list) - len(response2['imageDetails']).should.be(1) - - response2['imageDetails'][0]['imageDigest'].should.contain("sha") - - response2['imageDetails'][0]['registryId'].should.equal("012345678910") - - response2['imageDetails'][0]['repositoryName'].should.equal("test_repository") - - len(response2['imageDetails'][0]['imageTags']).should.be(2) - response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) - -@mock_ecr -def test_list_images(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository_1' - ) - - _ = client.create_repository( - repositoryName='test_repository_2' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository_1', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - _ = client.put_image( - repositoryName='test_repository_2', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='oldest' - ) - - response = client.list_images(repositoryName='test_repository_1') - type(response['imageIds']).should.be(list) - len(response['imageIds']).should.be(3) - - image_tags = ['latest', 'v1', 'v2'] - set([response['imageIds'][0]['imageTag'], - response['imageIds'][1]['imageTag'], - response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) - - response = client.list_images(repositoryName='test_repository_2') - type(response['imageIds']).should.be(list) - len(response['imageIds']).should.be(1) - response['imageIds'][0]['imageTag'].should.equal('oldest') - - -@mock_ecr -def test_list_images_from_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository_1' - ) - - # non existing repo - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.list_images.when.called_with( - repositoryName='repo-that-doesnt-exist', - registryId='123', - ).should.throw(Exception, error_msg) - - # repo does not exist in specified registry - error_msg = re.compile( - r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*", - re.MULTILINE) - client.list_images.when.called_with( - repositoryName='test_repository_1', - registryId='222', - ).should.throw(Exception, error_msg) - - -@mock_ecr -def test_describe_images(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()) - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.describe_images(repositoryName='test_repository') - type(response['imageDetails']).should.be(list) - len(response['imageDetails']).should.be(4) - - response['imageDetails'][0]['imageDigest'].should.contain("sha") - response['imageDetails'][1]['imageDigest'].should.contain("sha") - response['imageDetails'][2]['imageDigest'].should.contain("sha") - response['imageDetails'][3]['imageDigest'].should.contain("sha") - - response['imageDetails'][0]['registryId'].should.equal("012345678910") - response['imageDetails'][1]['registryId'].should.equal("012345678910") - response['imageDetails'][2]['registryId'].should.equal("012345678910") - response['imageDetails'][3]['registryId'].should.equal("012345678910") - - response['imageDetails'][0]['repositoryName'].should.equal("test_repository") - response['imageDetails'][1]['repositoryName'].should.equal("test_repository") - response['imageDetails'][2]['repositoryName'].should.equal("test_repository") - response['imageDetails'][3]['repositoryName'].should.equal("test_repository") - - response['imageDetails'][0].should_not.have.key('imageTags') - len(response['imageDetails'][1]['imageTags']).should.be(1) - len(response['imageDetails'][2]['imageTags']).should.be(1) - len(response['imageDetails'][3]['imageTags']).should.be(1) - - image_tags = ['latest', 'v1', 'v2'] - set([response['imageDetails'][1]['imageTags'][0], - response['imageDetails'][2]['imageTags'][0], - response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags)) - - response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) - response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800) - - -@mock_ecr -def test_describe_images_by_tag(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - tag_map = {} - for tag in ['latest', 'v1', 'v2']: - put_response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag=tag - ) - tag_map[tag] = put_response['image'] - - for tag, put_response in tag_map.items(): - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - image_detail['registryId'].should.equal("012345678910") - image_detail['repositoryName'].should.equal("test_repository") - image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) - image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) - - -@mock_ecr -def test_describe_images_tags_should_not_contain_empty_tag1(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - manifest = _create_image_manifest() - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest) - ) - - tags = ['v1', 'v2', 'latest'] - for tag in tags: - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag=tag - ) - - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - len(image_detail['imageTags']).should.equal(3) - image_detail['imageTags'].should.be.equal(tags) - - -@mock_ecr -def test_describe_images_tags_should_not_contain_empty_tag2(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - manifest = _create_image_manifest() - tags = ['v1', 'v2'] - for tag in tags: - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag=tag - ) - - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest) - ) - - client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(manifest), - imageTag='latest' - ) - - response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - len(image_detail['imageTags']).should.equal(3) - image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest']) - - -@mock_ecr -def test_describe_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.describe_repositories.when.called_with( - repositoryNames=['repo-that-doesnt-exist'], - registryId='123', - ).should.throw(ClientError, error_msg) - -@mock_ecr -def test_describe_image_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - client.create_repository(repositoryName='test_repository') - - error_msg1 = re.compile( - r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within " - r"the repository with name 'test_repository' in the registry with id '123'.*", - re.MULTILINE) - - client.describe_images.when.called_with( - repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123', - ).should.throw(ClientError, error_msg1) - - error_msg2 = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - client.describe_images.when.called_with( - repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123', - ).should.throw(ClientError, error_msg2) - - -@mock_ecr -def test_delete_repository_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - - error_msg = re.compile( - r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", - re.MULTILINE) - - client.delete_repository.when.called_with( - repositoryName='repo-that-doesnt-exist', - registryId='123').should.throw( - ClientError, error_msg) - - -@mock_ecr -def test_describe_images_by_digest(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - tags = ['latest', 'v1', 'v2'] - digest_map = {} - for tag in tags: - put_response = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag=tag - ) - digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image'] - - for digest, put_response in digest_map.items(): - response = client.describe_images(repositoryName='test_repository', - imageIds=[{'imageDigest': digest}]) - len(response['imageDetails']).should.be(1) - image_detail = response['imageDetails'][0] - image_detail['registryId'].should.equal("012345678910") - image_detail['repositoryName'].should.equal("test_repository") - image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) - image_detail['imageDigest'].should.equal(digest) - - -@mock_ecr -def test_get_authorization_token_assume_region(): - client = boto3.client('ecr', region_name='us-east-1') - auth_token_response = client.get_authorization_token() - - auth_token_response.should.contain('authorizationData') - auth_token_response.should.contain('ResponseMetadata') - auth_token_response['authorizationData'].should.equal([ - { - 'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) - }, - ]) - - -@mock_ecr -def test_get_authorization_token_explicit_regions(): - client = boto3.client('ecr', region_name='us-east-1') - auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787']) - - auth_token_response.should.contain('authorizationData') - auth_token_response.should.contain('ResponseMetadata') - auth_token_response['authorizationData'].should.equal([ - { - 'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=', - 'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), - }, - { - 'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu', - 'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com', - 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) - - } - ]) - - -@mock_ecr -def test_batch_get_image(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.batch_get_image( - repositoryName='test_repository', - imageIds=[ - { - 'imageTag': 'v2' - }, - ], - ) - - type(response['images']).should.be(list) - len(response['images']).should.be(1) - - response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") - response['images'][0]['registryId'].should.equal("012345678910") - response['images'][0]['repositoryName'].should.equal("test_repository") - - response['images'][0]['imageId']['imageTag'].should.equal("v2") - response['images'][0]['imageId']['imageDigest'].should.contain("sha") - - type(response['failures']).should.be(list) - len(response['failures']).should.be(0) - - -@mock_ecr -def test_batch_get_image_that_doesnt_exist(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v1' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='v2' - ) - - response = client.batch_get_image( - repositoryName='test_repository', - imageIds=[ - { - 'imageTag': 'v5' - }, - ], - ) - - type(response['images']).should.be(list) - len(response['images']).should.be(0) - - type(response['failures']).should.be(list) - len(response['failures']).should.be(1) - response['failures'][0]['failureReason'].should.equal("Requested image not found") - response['failures'][0]['failureCode'].should.equal("ImageNotFound") - response['failures'][0]['imageId']['imageTag'].should.equal("v5") - - -@mock_ecr -def test_batch_get_image_no_tags(): - client = boto3.client('ecr', region_name='us-east-1') - _ = client.create_repository( - repositoryName='test_repository' - ) - - _ = client.put_image( - repositoryName='test_repository', - imageManifest=json.dumps(_create_image_manifest()), - imageTag='latest' - ) - - error_msg = re.compile( - r".*Missing required parameter in input: \"imageIds\".*", - re.MULTILINE) - - client.batch_get_image.when.called_with( - repositoryName='test_repository').should.throw( - ParamValidationError, error_msg) +from __future__ import unicode_literals + +import hashlib +import json +from datetime import datetime +from random import random + +import re +import sure # noqa + +import boto3 +from botocore.exceptions import ClientError, ParamValidationError +from dateutil.tz import tzlocal + +from moto import mock_ecr + + +def _create_image_digest(contents=None): + if not contents: + contents = 'docker_image{0}'.format(int(random() * 10 ** 6)) + return "sha256:%s" % hashlib.sha256(contents.encode('utf-8')).hexdigest() + + +def _create_image_manifest(): + return { + "schemaVersion": 2, + "mediaType": "application/vnd.docker.distribution.manifest.v2+json", + "config": + { + "mediaType": "application/vnd.docker.container.image.v1+json", + "size": 7023, + "digest": _create_image_digest("config") + }, + "layers": [ + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 32654, + "digest": _create_image_digest("layer1") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 16724, + "digest": _create_image_digest("layer2") + }, + { + "mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", + "size": 73109, + # randomize image digest + "digest": _create_image_digest() + } + ] + } + + +@mock_ecr +def test_create_repository(): + client = boto3.client('ecr', region_name='us-east-1') + response = client.create_repository( + repositoryName='test_ecr_repository' + ) + response['repository']['repositoryName'].should.equal('test_ecr_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_ecr_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_ecr_repository') + # response['repository']['createdAt'].should.equal(0) + + +@mock_ecr +def test_describe_repositories(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories() + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_describe_repositories_1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='012345678910') + len(response['repositories']).should.equal(2) + + respository_arns = ['arn:aws:ecr:us-east-1:012345678910:repository/test_repository1', + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository0'] + set([response['repositories'][0]['repositoryArn'], + response['repositories'][1]['repositoryArn']]).should.equal(set(respository_arns)) + + respository_uris = ['012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1', + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0'] + set([response['repositories'][0]['repositoryUri'], + response['repositories'][1]['repositoryUri']]).should.equal(set(respository_uris)) + + +@mock_ecr +def test_describe_repositories_2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(registryId='109876543210') + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_describe_repositories_3(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository1' + ) + _ = client.create_repository( + repositoryName='test_repository0' + ) + response = client.describe_repositories(repositoryNames=['test_repository1']) + len(response['repositories']).should.equal(1) + respository_arn = 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository1' + response['repositories'][0]['repositoryArn'].should.equal(respository_arn) + + respository_uri = '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1' + response['repositories'][0]['repositoryUri'].should.equal(respository_uri) + + +@mock_ecr +def test_describe_repositories_with_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response = client.describe_repositories(repositoryNames=['test_repository']) + len(response['repositories']).should.equal(1) + + +@mock_ecr +def test_delete_repository(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + response = client.delete_repository(repositoryName='test_repository') + response['repository']['repositoryName'].should.equal('test_repository') + response['repository']['repositoryArn'].should.equal( + 'arn:aws:ecr:us-east-1:012345678910:repository/test_repository') + response['repository']['registryId'].should.equal('012345678910') + response['repository']['repositoryUri'].should.equal( + '012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository') + # response['repository']['createdAt'].should.equal(0) + + response = client.describe_repositories() + len(response['repositories']).should.equal(0) + + +@mock_ecr +def test_put_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + response['image']['imageId']['imageTag'].should.equal('latest') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + +@mock_ecr +def test_put_image_with_multiple_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + manifest = _create_image_manifest() + response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='v1' + ) + + response['image']['imageId']['imageTag'].should.equal('v1') + response['image']['imageId']['imageDigest'].should.contain("sha") + response['image']['repositoryName'].should.equal('test_repository') + response['image']['registryId'].should.equal('012345678910') + + response1 = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response1['image']['imageId']['imageTag'].should.equal('latest') + response1['image']['imageId']['imageDigest'].should.contain("sha") + response1['image']['repositoryName'].should.equal('test_repository') + response1['image']['registryId'].should.equal('012345678910') + + response2 = client.describe_images(repositoryName='test_repository') + type(response2['imageDetails']).should.be(list) + len(response2['imageDetails']).should.be(1) + + response2['imageDetails'][0]['imageDigest'].should.contain("sha") + + response2['imageDetails'][0]['registryId'].should.equal("012345678910") + + response2['imageDetails'][0]['repositoryName'].should.equal("test_repository") + + len(response2['imageDetails'][0]['imageTags']).should.be(2) + response2['imageDetails'][0]['imageTags'].should.be.equal(['v1', 'latest']) + +@mock_ecr +def test_list_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository_1' + ) + + _ = client.create_repository( + repositoryName='test_repository_2' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository_1', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + _ = client.put_image( + repositoryName='test_repository_2', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='oldest' + ) + + response = client.list_images(repositoryName='test_repository_1') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(3) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageIds'][0]['imageTag'], + response['imageIds'][1]['imageTag'], + response['imageIds'][2]['imageTag']]).should.equal(set(image_tags)) + + response = client.list_images(repositoryName='test_repository_2') + type(response['imageIds']).should.be(list) + len(response['imageIds']).should.be(1) + response['imageIds'][0]['imageTag'].should.equal('oldest') + + +@mock_ecr +def test_list_images_from_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository_1' + ) + + # non existing repo + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123', + ).should.throw(Exception, error_msg) + + # repo does not exist in specified registry + error_msg = re.compile( + r".*The repository with name 'test_repository_1' does not exist in the registry with id '222'.*", + re.MULTILINE) + client.list_images.when.called_with( + repositoryName='test_repository_1', + registryId='222', + ).should.throw(Exception, error_msg) + + +@mock_ecr +def test_describe_images(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()) + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.describe_images(repositoryName='test_repository') + type(response['imageDetails']).should.be(list) + len(response['imageDetails']).should.be(4) + + response['imageDetails'][0]['imageDigest'].should.contain("sha") + response['imageDetails'][1]['imageDigest'].should.contain("sha") + response['imageDetails'][2]['imageDigest'].should.contain("sha") + response['imageDetails'][3]['imageDigest'].should.contain("sha") + + response['imageDetails'][0]['registryId'].should.equal("012345678910") + response['imageDetails'][1]['registryId'].should.equal("012345678910") + response['imageDetails'][2]['registryId'].should.equal("012345678910") + response['imageDetails'][3]['registryId'].should.equal("012345678910") + + response['imageDetails'][0]['repositoryName'].should.equal("test_repository") + response['imageDetails'][1]['repositoryName'].should.equal("test_repository") + response['imageDetails'][2]['repositoryName'].should.equal("test_repository") + response['imageDetails'][3]['repositoryName'].should.equal("test_repository") + + response['imageDetails'][0].should_not.have.key('imageTags') + len(response['imageDetails'][1]['imageTags']).should.be(1) + len(response['imageDetails'][2]['imageTags']).should.be(1) + len(response['imageDetails'][3]['imageTags']).should.be(1) + + image_tags = ['latest', 'v1', 'v2'] + set([response['imageDetails'][1]['imageTags'][0], + response['imageDetails'][2]['imageTags'][0], + response['imageDetails'][3]['imageTags'][0]]).should.equal(set(image_tags)) + + response['imageDetails'][0]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][1]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][2]['imageSizeInBytes'].should.equal(52428800) + response['imageDetails'][3]['imageSizeInBytes'].should.equal(52428800) + + +@mock_ecr +def test_describe_images_by_tag(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tag_map = {} + for tag in ['latest', 'v1', 'v2']: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + tag_map[tag] = put_response['image'] + + for tag, put_response in tag_map.items(): + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(put_response['imageId']['imageDigest']) + + +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag1(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + tags = ['v1', 'v2', 'latest'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(tags) + + +@mock_ecr +def test_describe_images_tags_should_not_contain_empty_tag2(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + manifest = _create_image_manifest() + tags = ['v1', 'v2'] + for tag in tags: + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag=tag + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest) + ) + + client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(manifest), + imageTag='latest' + ) + + response = client.describe_images(repositoryName='test_repository', imageIds=[{'imageTag': tag}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + len(image_detail['imageTags']).should.equal(3) + image_detail['imageTags'].should.be.equal(['v1', 'v2', 'latest']) + + +@mock_ecr +def test_describe_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_repositories.when.called_with( + repositoryNames=['repo-that-doesnt-exist'], + registryId='123', + ).should.throw(ClientError, error_msg) + +@mock_ecr +def test_describe_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + client.create_repository(repositoryName='test_repository') + + error_msg1 = re.compile( + r".*The image with imageId {imageDigest:'null', imageTag:'testtag'} does not exist within " + r"the repository with name 'test_repository' in the registry with id '123'.*", + re.MULTILINE) + + client.describe_images.when.called_with( + repositoryName='test_repository', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg1) + + error_msg2 = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + client.describe_images.when.called_with( + repositoryName='repo-that-doesnt-exist', imageIds=[{'imageTag': 'testtag'}], registryId='123', + ).should.throw(ClientError, error_msg2) + + +@mock_ecr +def test_delete_repository_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + + error_msg = re.compile( + r".*The repository with name 'repo-that-doesnt-exist' does not exist in the registry with id '123'.*", + re.MULTILINE) + + client.delete_repository.when.called_with( + repositoryName='repo-that-doesnt-exist', + registryId='123').should.throw( + ClientError, error_msg) + + +@mock_ecr +def test_describe_images_by_digest(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + tags = ['latest', 'v1', 'v2'] + digest_map = {} + for tag in tags: + put_response = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag=tag + ) + digest_map[put_response['image']['imageId']['imageDigest']] = put_response['image'] + + for digest, put_response in digest_map.items(): + response = client.describe_images(repositoryName='test_repository', + imageIds=[{'imageDigest': digest}]) + len(response['imageDetails']).should.be(1) + image_detail = response['imageDetails'][0] + image_detail['registryId'].should.equal("012345678910") + image_detail['repositoryName'].should.equal("test_repository") + image_detail['imageTags'].should.equal([put_response['imageId']['imageTag']]) + image_detail['imageDigest'].should.equal(digest) + + +@mock_ecr +def test_get_authorization_token_assume_region(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token() + + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'QVdTOjAxMjM0NTY3ODkxMC1hdXRoLXRva2Vu', + 'proxyEndpoint': 'https://012345678910.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + }, + ]) + + +@mock_ecr +def test_get_authorization_token_explicit_regions(): + client = boto3.client('ecr', region_name='us-east-1') + auth_token_response = client.get_authorization_token(registryIds=['10987654321', '878787878787']) + + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') + auth_token_response['authorizationData'].should.equal([ + { + 'authorizationToken': 'QVdTOjEwOTg3NjU0MzIxLWF1dGgtdG9rZW4=', + 'proxyEndpoint': 'https://10987654321.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()), + }, + { + 'authorizationToken': 'QVdTOjg3ODc4Nzg3ODc4Ny1hdXRoLXRva2Vu', + 'proxyEndpoint': 'https://878787878787.dkr.ecr.us-east-1.amazonaws.com', + 'expiresAt': datetime(2015, 1, 1, tzinfo=tzlocal()) + + } + ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 70c1463ee..4bdba40d0 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1,2214 +1,2214 @@ -from __future__ import unicode_literals - -from copy import deepcopy - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa -import json -from moto.ec2 import utils as ec2_utils -from uuid import UUID - -from moto import mock_cloudformation, mock_elbv2 -from moto import mock_ecs -from moto import mock_ec2 -from nose.tools import assert_raises - - -@mock_ecs -def test_create_cluster(): - client = boto3.client('ecs', region_name='us-east-1') - response = client.create_cluster( - clusterName='test_ecs_cluster' - ) - response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['cluster']['status'].should.equal('ACTIVE') - response['cluster']['registeredContainerInstancesCount'].should.equal(0) - response['cluster']['runningTasksCount'].should.equal(0) - response['cluster']['pendingTasksCount'].should.equal(0) - response['cluster']['activeServicesCount'].should.equal(0) - - -@mock_ecs -def test_list_clusters(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_cluster0' - ) - _ = client.create_cluster( - clusterName='test_cluster1' - ) - response = client.list_clusters() - response['clusterArns'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') - response['clusterArns'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') - - -@mock_ecs -def test_delete_cluster(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - response = client.delete_cluster(cluster='test_ecs_cluster') - response['cluster']['clusterName'].should.equal('test_ecs_cluster') - response['cluster']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['cluster']['status'].should.equal('ACTIVE') - response['cluster']['registeredContainerInstancesCount'].should.equal(0) - response['cluster']['runningTasksCount'].should.equal(0) - response['cluster']['pendingTasksCount'].should.equal(0) - response['cluster']['activeServicesCount'].should.equal(0) - - response = client.list_clusters() - len(response['clusterArns']).should.equal(0) - - -@mock_ecs -def test_register_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - response = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['revision'].should.equal(1) - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][ - 0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0][ - 'image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][ - 0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][ - 0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][ - 0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0][ - 'logConfiguration']['logDriver'].should.equal('json-file') - - -@mock_ecs -def test_list_task_definitions(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world2', - 'image': 'docker/hello-world2:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY2' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.list_task_definitions() - len(response['taskDefinitionArns']).should.equal(2) - response['taskDefinitionArns'][0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinitionArns'][1].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') - - -@mock_ecs -def test_describe_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world2', - 'image': 'docker/hello-world2:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY2' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world3', - 'image': 'docker/hello-world3:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY3' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.describe_task_definition(taskDefinition='test_ecs_task') - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') - - response = client.describe_task_definition( - taskDefinition='test_ecs_task:2') - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') - - -@mock_ecs -def test_deregister_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.deregister_task_definition( - taskDefinition='test_ecs_task:1' - ) - type(response['taskDefinition']).should.be(dict) - response['taskDefinition']['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['taskDefinition']['containerDefinitions'][ - 0]['name'].should.equal('hello_world') - response['taskDefinition']['containerDefinitions'][0][ - 'image'].should.equal('docker/hello-world:latest') - response['taskDefinition']['containerDefinitions'][ - 0]['cpu'].should.equal(1024) - response['taskDefinition']['containerDefinitions'][ - 0]['memory'].should.equal(400) - response['taskDefinition']['containerDefinitions'][ - 0]['essential'].should.equal(True) - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') - response['taskDefinition']['containerDefinitions'][0][ - 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') - response['taskDefinition']['containerDefinitions'][0][ - 'logConfiguration']['logDriver'].should.equal('json-file') - - -@mock_ecs -def test_create_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['service']['schedulingStrategy'].should.equal('REPLICA') - -@mock_ecs -def test_create_service_scheduling_strategy(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2, - schedulingStrategy='DAEMON', - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['service']['schedulingStrategy'].should.equal('DAEMON') - - -@mock_ecs -def test_list_services(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.list_services( - cluster='test_ecs_cluster' - ) - len(response['serviceArns']).should.equal(2) - response['serviceArns'][0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['serviceArns'][1].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - - -@mock_ecs -def test_describe_services(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service3', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.describe_services( - cluster='test_ecs_cluster', - services=['test_ecs_service1', - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] - ) - len(response['services']).should.equal(2) - response['services'][0]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - response['services'][1]['serviceName'].should.equal('test_ecs_service2') - - response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) - response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) - response['services'][0]['deployments'][0]['runningCount'].should.equal(0) - response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') - - -@mock_ecs -def test_describe_services_scheduling_strategy(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service1', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service2', - taskDefinition='test_ecs_task', - desiredCount=2, - schedulingStrategy='DAEMON' - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service3', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response = client.describe_services( - cluster='test_ecs_cluster', - services=['test_ecs_service1', - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', - 'test_ecs_service3'] - ) - len(response['services']).should.equal(3) - response['services'][0]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') - response['services'][0]['serviceName'].should.equal('test_ecs_service1') - response['services'][1]['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') - response['services'][1]['serviceName'].should.equal('test_ecs_service2') - - response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) - response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) - response['services'][0]['deployments'][0]['runningCount'].should.equal(0) - response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') - - response['services'][0]['schedulingStrategy'].should.equal('REPLICA') - response['services'][1]['schedulingStrategy'].should.equal('DAEMON') - response['services'][2]['schedulingStrategy'].should.equal('REPLICA') - - -@mock_ecs -def test_update_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - response['service']['desiredCount'].should.equal(2) - - response = client.update_service( - cluster='test_ecs_cluster', - service='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=0 - ) - response['service']['desiredCount'].should.equal(0) - response['service']['schedulingStrategy'].should.equal('REPLICA') - - -@mock_ecs -def test_update_missing_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - - client.update_service.when.called_with( - cluster='test_ecs_cluster', - service='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=0 - ).should.throw(ClientError) - - -@mock_ecs -def test_delete_service(): - client = boto3.client('ecs', region_name='us-east-1') - _ = client.create_cluster( - clusterName='test_ecs_cluster' - ) - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - _ = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2 - ) - _ = client.update_service( - cluster='test_ecs_cluster', - service='test_ecs_service', - desiredCount=0 - ) - response = client.delete_service( - cluster='test_ecs_cluster', - service='test_ecs_service' - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(0) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(0) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['schedulingStrategy'].should.equal('REPLICA') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - - - -@mock_ec2 -@mock_ecs -def test_register_container_instance(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - arn_part = full_arn.split('/') - arn_part[0].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:container-instance') - arn_part[1].should.equal(str(UUID(arn_part[1]))) - response['containerInstance']['status'].should.equal('ACTIVE') - len(response['containerInstance']['registeredResources']).should.equal(4) - len(response['containerInstance']['remainingResources']).should.equal(4) - response['containerInstance']['agentConnected'].should.equal(True) - response['containerInstance']['versionInfo'][ - 'agentVersion'].should.equal('1.0.0') - response['containerInstance']['versionInfo'][ - 'agentHash'].should.equal('4023248') - response['containerInstance']['versionInfo'][ - 'dockerVersion'].should.equal('DockerVersion: 1.5.0') - - -@mock_ec2 -@mock_ecs -def test_deregister_container_instance(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - container_instance_id = response['containerInstance']['containerInstanceArn'] - response = ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id - ) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(0) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - container_instance_id = response['containerInstance']['containerInstanceArn'] - _ = ecs_client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - response = ecs_client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='moto' - ) - with assert_raises(Exception) as e: - ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id - ).should.have.raised(Exception) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(1) - ecs_client.deregister_container_instance( - cluster=test_cluster_name, - containerInstance=container_instance_id, - force=True - ) - container_instances_response = ecs_client.list_container_instances( - cluster=test_cluster_name - ) - len(container_instances_response['containerInstanceArns']).should.equal(0) - - -@mock_ec2 -@mock_ecs -def test_list_container_instances(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) - - response = ecs_client.list_container_instances(cluster=test_cluster_name) - - len(response['containerInstanceArns']).should.equal(instance_to_create) - for arn in test_instance_arns: - response['containerInstanceArns'].should.contain(arn) - - -@mock_ec2 -@mock_ecs -def test_describe_container_instances(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) - - test_instance_ids = list( - map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.describe_container_instances( - cluster=test_cluster_name, containerInstances=test_instance_ids) - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_arns = [ci['containerInstanceArn'] - for ci in response['containerInstances']] - for arn in test_instance_arns: - response_arns.should.contain(arn) - for instance in response['containerInstances']: - instance.keys().should.contain('runningTasksCount') - instance.keys().should.contain('pendingTasksCount') - - -@mock_ec2 -@mock_ecs -def test_update_container_instances_state(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instance_to_create = 3 - test_instance_arns = [] - for i in range(0, instance_to_create): - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document) - - test_instance_arns.append(response['containerInstance']['containerInstanceArn']) - - test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='DRAINING') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('DRAINING') - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='DRAINING') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('DRAINING') - response = ecs_client.update_container_instances_state(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='ACTIVE') - len(response['failures']).should.equal(0) - len(response['containerInstances']).should.equal(instance_to_create) - response_statuses = [ci['status'] for ci in response['containerInstances']] - for status in response_statuses: - status.should.equal('ACTIVE') - ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, - containerInstances=test_instance_ids, - status='test_status').should.throw(Exception) - - -@mock_ec2 -@mock_ecs -def test_run_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(2) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/') - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ec2 -@mock_ecs -def test_start_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - container_instances = client.list_container_instances( - cluster=test_cluster_name) - container_instance_id = container_instances[ - 'containerInstanceArns'][0].split('/')[-1] - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - response = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='moto' - ) - - len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ec2 -@mock_ecs -def test_list_tasks(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - container_instances = client.list_container_instances( - cluster=test_cluster_name) - container_instance_id = container_instances[ - 'containerInstanceArns'][0].split('/')[-1] - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - _ = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='foo' - ) - - _ = client.start_task( - cluster='test_ecs_cluster', - taskDefinition='test_ecs_task', - overrides={}, - containerInstances=[container_instance_id], - startedBy='bar' - ) - - assert len(client.list_tasks()['taskArns']).should.equal(2) - assert len(client.list_tasks(cluster='test_ecs_cluster') - ['taskArns']).should.equal(2) - assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_describe_tasks(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - tasks_arns = [ - task['taskArn'] for task in client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - )['tasks'] - ] - response = client.describe_tasks( - cluster='test_ecs_cluster', - tasks=tasks_arns - ) - - len(response['tasks']).should.equal(2) - set([response['tasks'][0]['taskArn'], response['tasks'] - [1]['taskArn']]).should.equal(set(tasks_arns)) - - # Test we can pass task ids instead of ARNs - response = client.describe_tasks( - cluster='test_ecs_cluster', - tasks=[tasks_arns[0].split("/")[-1]] - ) - len(response['tasks']).should.equal(1) - - -@mock_ecs -def describe_task_definition(): - client = boto3.client('ecs', region_name='us-east-1') - container_definition = { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - task_definition = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[container_definition] - ) - family = task_definition['family'] - task = client.describe_task_definition(taskDefinition=family) - task['containerDefinitions'][0].should.equal(container_definition) - task['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') - task['volumes'].should.equal([]) - - -@mock_ec2 -@mock_ecs -def test_stop_task(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - stop_response = client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - - stop_response['task']['taskArn'].should.equal( - run_response['tasks'][0].get('taskArn')) - stop_response['task']['lastStatus'].should.equal('STOPPED') - stop_response['task']['desiredStatus'].should.equal('STOPPED') - stop_response['task']['stoppedReason'].should.equal('moto testing') - - -@mock_ec2 -@mock_ecs -def test_resource_reservation_and_release(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'hostPort': 80, - 'containerPort': 8080 - } - ] - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources( - container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) - registered_resources['PORTS'].append('80') - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(1) - client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources( - container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(0) - -@mock_ec2 -@mock_ecs -def test_resource_reservation_and_release_memory_reservation(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - _ = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'memoryReservation': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'containerPort': 8080 - } - ] - } - ] - ) - run_response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=1, - startedBy='moto' - ) - container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(1) - client.stop_task( - cluster='test_ecs_cluster', - task=run_response['tasks'][0].get('taskArn'), - reason='moto testing' - ) - container_instance_description = client.describe_container_instances( - cluster='test_ecs_cluster', - containerInstances=[container_instance_arn] - )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) - remaining_resources['CPU'].should.equal(registered_resources['CPU']) - remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) - remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) - container_instance_description['runningTasksCount'].should.equal(0) - - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - } - } - } - template_json = json.dumps(template) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(0) - - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_create_cluster_through_cloudformation_no_name(): - # cloudformation should create a cluster name for you if you do not provide it - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster1" - } - } - } - } - template2 = deepcopy(template1) - template2['Resources']['testCluster'][ - 'Properties']['ClusterName'] = 'testcluster2' - template1_json = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - stack_resp = cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template1_json, - ) - - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName=stack_resp['StackId'], - TemplateBody=template2_json - ) - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_clusters() - len(resp['clusterArns']).should.equal(1) - resp['clusterArns'][0].endswith('testcluster2').should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_task_definition_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - stack_name = 'test_stack' - cfn_conn.create_stack( - StackName=stack_name, - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_task_definitions() - len(resp['taskDefinitionArns']).should.equal(1) - task_definition_arn = resp['taskDefinitionArns'][0] - - task_definition_details = cfn_conn.describe_stack_resource( - StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] - task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) - -@mock_ec2 -@mock_ecs -def test_task_definitions_unable_to_be_placed(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 5000, - 'memory': 40000, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(0) - - -@mock_ec2 -@mock_ecs -def test_task_definitions_with_port_clash(): - client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - _ = client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 256, - 'memory': 512, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'}, - 'portMappings': [ - { - 'hostPort': 80, - 'containerPort': 8080 - } - ] - } - ] - ) - response = client.run_task( - cluster='test_ecs_cluster', - overrides={}, - taskDefinition='test_ecs_task', - count=2, - startedBy='moto' - ) - len(response['tasks']).should.equal(1) - response['tasks'][0]['taskArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:task/') - response['tasks'][0]['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['tasks'][0]['taskDefinitionArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') - response['tasks'][0]['containerInstanceArn'].should.contain( - 'arn:aws:ecs:us-east-1:012345678910:container-instance/') - response['tasks'][0]['overrides'].should.equal({}) - response['tasks'][0]['lastStatus'].should.equal("RUNNING") - response['tasks'][0]['desiredStatus'].should.equal("RUNNING") - response['tasks'][0]['startedBy'].should.equal("moto") - response['tasks'][0]['stoppedReason'].should.equal("") - - -@mock_ecs -@mock_cloudformation -def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "Family": "testTaskDefinition1", - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - } - } - } - template1_json = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template1_json, - ) - - template2 = deepcopy(template1) - template2['Resources']['testTaskDefinition'][ - 'Properties']['Family'] = 'testTaskDefinition2' - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName="test_stack", - TemplateBody=template2_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') - len(resp['taskDefinitionArns']).should.equal(1) - resp['taskDefinitionArns'][0].endswith( - 'testTaskDefinition2:1').should.be.true - - -@mock_ecs -@mock_cloudformation -def test_create_service_through_cloudformation(): - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "DesiredCount": 10, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - } - } - } - } - template_json = json.dumps(template) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_services(cluster='testcluster') - len(resp['serviceArns']).should.equal(1) - - -@mock_ecs -@mock_cloudformation -def test_update_service_through_cloudformation_should_trigger_replacement(): - template1 = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testCluster": { - "Type": "AWS::ECS::Cluster", - "Properties": { - "ClusterName": "testcluster" - } - }, - "testTaskDefinition": { - "Type": "AWS::ECS::TaskDefinition", - "Properties": { - "ContainerDefinitions": [ - { - "Name": "ecs-sample", - "Image": "amazon/amazon-ecs-sample", - "Cpu": "200", - "Memory": "500", - "Essential": "true" - } - ], - "Volumes": [], - } - }, - "testService": { - "Type": "AWS::ECS::Service", - "Properties": { - "Cluster": {"Ref": "testCluster"}, - "TaskDefinition": {"Ref": "testTaskDefinition"}, - "DesiredCount": 10, - } - } - } - } - template_json1 = json.dumps(template1) - cfn_conn = boto3.client('cloudformation', region_name='us-west-1') - cfn_conn.create_stack( - StackName="test_stack", - TemplateBody=template_json1, - ) - template2 = deepcopy(template1) - template2['Resources']['testService']['Properties']['DesiredCount'] = 5 - template2_json = json.dumps(template2) - cfn_conn.update_stack( - StackName="test_stack", - TemplateBody=template2_json, - ) - - ecs_conn = boto3.client('ecs', region_name='us-west-1') - resp = ecs_conn.list_services(cluster='testcluster') - len(resp['serviceArns']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_attributes(): - # Combined put, list delete attributes into the same test due to the amount of setup - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - instances = [] - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - instances.append(test_instance) - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn1 = response['containerInstance']['containerInstanceArn'] - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - instances.append(test_instance) - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn2 = response['containerInstance']['containerInstanceArn'] - partial_arn2 = full_arn2.rsplit('/', 1)[-1] - - full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) - - # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. - ecs_client.put_attributes( - cluster=test_cluster_name, - attributes=[ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, - 'targetType': 'container-instance'} - ] - ) - - resp = ecs_client.list_attributes( - cluster=test_cluster_name, - targetType='container-instance' - ) - attrs = resp['attributes'] - - NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) - NUM_DEFAULT_ATTRIBUTES = 4 - len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) - - # Tests that the attrs have been set properly - len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) - len(list( - filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) - - ecs_client.delete_attributes( - cluster=test_cluster_name, - attributes=[ - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, - 'targetType': 'container-instance'} - ] - ) - NUM_CUSTOM_ATTRIBUTES -= 1 - - resp = ecs_client.list_attributes( - cluster=test_cluster_name, - targetType='container-instance' - ) - attrs = resp['attributes'] - len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) - - -@mock_ecs -def test_poll_endpoint(): - # Combined put, list delete attributes into the same test due to the amount of setup - ecs_client = boto3.client('ecs', region_name='us-east-1') - - # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception - resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') - resp.should.contain('endpoint') - resp.should.contain('telemetryEndpoint') - - -@mock_ecs -def test_list_task_definition_families(): - client = boto3.client('ecs', region_name='us-east-1') - client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - client.register_task_definition( - family='alt_test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - - resp1 = client.list_task_definition_families() - resp2 = client.list_task_definition_families(familyPrefix='alt') - - len(resp1['families']).should.equal(2) - len(resp2['families']).should.equal(1) - - -@mock_ec2 -@mock_ecs -def test_default_container_instance_attributes(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - # Create cluster and EC2 instance - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - # Register container instance - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - container_instance_id = full_arn.rsplit('/', 1)[-1] - - default_attributes = response['containerInstance']['attributes'] - assert len(default_attributes) == 4 - expected_result = [ - {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, - {'name': 'ecs.ami-id', 'value': test_instance.image_id}, - {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, - {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} - ] - assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, - key=lambda item: item['name']) - - -@mock_ec2 -@mock_ecs -def test_describe_container_instances_with_attributes(): - ecs_client = boto3.client('ecs', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - test_cluster_name = 'test_ecs_cluster' - - # Create cluster and EC2 instance - _ = ecs_client.create_cluster( - clusterName=test_cluster_name - ) - - test_instance = ec2.create_instances( - ImageId="ami-1234abcd", - MinCount=1, - MaxCount=1, - )[0] - - instance_id_document = json.dumps( - ec2_utils.generate_instance_identity_document(test_instance) - ) - - # Register container instance - response = ecs_client.register_container_instance( - cluster=test_cluster_name, - instanceIdentityDocument=instance_id_document - ) - - response['containerInstance'][ - 'ec2InstanceId'].should.equal(test_instance.id) - full_arn = response['containerInstance']['containerInstanceArn'] - container_instance_id = full_arn.rsplit('/', 1)[-1] - default_attributes = response['containerInstance']['attributes'] - - # Set attributes on container instance, one without a value - attributes = [ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, - 'targetType': 'container-instance'}, - {'name': 'attr_without_value'} - ] - ecs_client.put_attributes( - cluster=test_cluster_name, - attributes=attributes - ) - - # Describe container instance, should have attributes previously set - described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, - containerInstances=[container_instance_id]) - - assert len(described_instance['containerInstances']) == 1 - assert isinstance(described_instance['containerInstances'][0]['attributes'], list) - - # Remove additional info passed to put_attributes - cleaned_attributes = [] - for attribute in attributes: - attribute.pop('targetId', None) - attribute.pop('targetType', None) - cleaned_attributes.append(attribute) - described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], - key=lambda item: item['name']) - expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) - assert described_attributes == expected_attributes - - -def _fetch_container_instance_resources(container_instance_description): - remaining_resources = {} - registered_resources = {} - remaining_resources_list = container_instance_description['remainingResources'] - registered_resources_list = container_instance_description['registeredResources'] - remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ - 0] - remaining_resources['MEMORY'] = \ - [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] - remaining_resources['PORTS'] = \ - [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] - registered_resources['CPU'] = \ - [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] - registered_resources['MEMORY'] = \ - [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] - registered_resources['PORTS'] = \ - [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] - return remaining_resources, registered_resources - - -@mock_ecs -def test_create_service_load_balancing(): - client = boto3.client('ecs', region_name='us-east-1') - client.create_cluster( - clusterName='test_ecs_cluster' - ) - client.register_task_definition( - family='test_ecs_task', - containerDefinitions=[ - { - 'name': 'hello_world', - 'image': 'docker/hello-world:latest', - 'cpu': 1024, - 'memory': 400, - 'essential': True, - 'environment': [{ - 'name': 'AWS_ACCESS_KEY_ID', - 'value': 'SOME_ACCESS_KEY' - }], - 'logConfiguration': {'logDriver': 'json-file'} - } - ] - ) - response = client.create_service( - cluster='test_ecs_cluster', - serviceName='test_ecs_service', - taskDefinition='test_ecs_task', - desiredCount=2, - loadBalancers=[ - { - 'targetGroupArn': 'test_target_group_arn', - 'loadBalancerName': 'test_load_balancer_name', - 'containerName': 'test_container_name', - 'containerPort': 123 - } - ] - ) - response['service']['clusterArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') - response['service']['desiredCount'].should.equal(2) - len(response['service']['events']).should.equal(0) - len(response['service']['loadBalancers']).should.equal(1) - response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( - 'test_target_group_arn') - response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( - 'test_load_balancer_name') - response['service']['loadBalancers'][0]['containerName'].should.equal( - 'test_container_name') - response['service']['loadBalancers'][0]['containerPort'].should.equal(123) - response['service']['pendingCount'].should.equal(0) - response['service']['runningCount'].should.equal(0) - response['service']['serviceArn'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') - response['service']['serviceName'].should.equal('test_ecs_service') - response['service']['status'].should.equal('ACTIVE') - response['service']['taskDefinition'].should.equal( - 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') +from __future__ import unicode_literals + +from copy import deepcopy + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +import json +from moto.ec2 import utils as ec2_utils +from uuid import UUID + +from moto import mock_cloudformation, mock_elbv2 +from moto import mock_ecs +from moto import mock_ec2 +from nose.tools import assert_raises + + +@mock_ecs +def test_create_cluster(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.create_cluster( + clusterName='test_ecs_cluster' + ) + response['cluster']['clusterName'].should.equal('test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['status'].should.equal('ACTIVE') + response['cluster']['registeredContainerInstancesCount'].should.equal(0) + response['cluster']['runningTasksCount'].should.equal(0) + response['cluster']['pendingTasksCount'].should.equal(0) + response['cluster']['activeServicesCount'].should.equal(0) + + +@mock_ecs +def test_list_clusters(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_cluster0' + ) + _ = client.create_cluster( + clusterName='test_cluster1' + ) + response = client.list_clusters() + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster0') + response['clusterArns'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_cluster1') + + +@mock_ecs +def test_delete_cluster(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + response = client.delete_cluster(cluster='test_ecs_cluster') + response['cluster']['clusterName'].should.equal('test_ecs_cluster') + response['cluster']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['cluster']['status'].should.equal('ACTIVE') + response['cluster']['registeredContainerInstancesCount'].should.equal(0) + response['cluster']['runningTasksCount'].should.equal(0) + response['cluster']['pendingTasksCount'].should.equal(0) + response['cluster']['activeServicesCount'].should.equal(0) + + response = client.list_clusters() + len(response['clusterArns']).should.equal(0) + + +@mock_ecs +def test_register_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + response = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + type(response['taskDefinition']).should.be(dict) + response['taskDefinition']['revision'].should.equal(1) + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') + + +@mock_ecs +def test_list_task_definitions(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world2', + 'image': 'docker/hello-world2:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY2' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.list_task_definitions() + len(response['taskDefinitionArns']).should.equal(2) + response['taskDefinitionArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinitionArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + + +@mock_ecs +def test_describe_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world2', + 'image': 'docker/hello-world2:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY2' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world3', + 'image': 'docker/hello-world3:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY3' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.describe_task_definition(taskDefinition='test_ecs_task') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:3') + + response = client.describe_task_definition( + taskDefinition='test_ecs_task:2') + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:2') + + +@mock_ecs +def test_deregister_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.deregister_task_definition( + taskDefinition='test_ecs_task:1' + ) + type(response['taskDefinition']).should.be(dict) + response['taskDefinition']['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['taskDefinition']['containerDefinitions'][ + 0]['name'].should.equal('hello_world') + response['taskDefinition']['containerDefinitions'][0][ + 'image'].should.equal('docker/hello-world:latest') + response['taskDefinition']['containerDefinitions'][ + 0]['cpu'].should.equal(1024) + response['taskDefinition']['containerDefinitions'][ + 0]['memory'].should.equal(400) + response['taskDefinition']['containerDefinitions'][ + 0]['essential'].should.equal(True) + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['name'].should.equal('AWS_ACCESS_KEY_ID') + response['taskDefinition']['containerDefinitions'][0][ + 'environment'][0]['value'].should.equal('SOME_ACCESS_KEY') + response['taskDefinition']['containerDefinitions'][0][ + 'logConfiguration']['logDriver'].should.equal('json-file') + + +@mock_ecs +def test_create_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('REPLICA') + +@mock_ecs +def test_create_service_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON', + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['service']['schedulingStrategy'].should.equal('DAEMON') + + +@mock_ecs +def test_list_services(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.list_services( + cluster='test_ecs_cluster' + ) + len(response['serviceArns']).should.equal(2) + response['serviceArns'][0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['serviceArns'][1].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + + +@mock_ecs +def test_describe_services(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2'] + ) + len(response['services']).should.equal(2) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + +@mock_ecs +def test_describe_services_scheduling_strategy(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service1', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service2', + taskDefinition='test_ecs_task', + desiredCount=2, + schedulingStrategy='DAEMON' + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service3', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response = client.describe_services( + cluster='test_ecs_cluster', + services=['test_ecs_service1', + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2', + 'test_ecs_service3'] + ) + len(response['services']).should.equal(3) + response['services'][0]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service1') + response['services'][0]['serviceName'].should.equal('test_ecs_service1') + response['services'][1]['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service2') + response['services'][1]['serviceName'].should.equal('test_ecs_service2') + + response['services'][0]['deployments'][0]['desiredCount'].should.equal(2) + response['services'][0]['deployments'][0]['pendingCount'].should.equal(2) + response['services'][0]['deployments'][0]['runningCount'].should.equal(0) + response['services'][0]['deployments'][0]['status'].should.equal('PRIMARY') + + response['services'][0]['schedulingStrategy'].should.equal('REPLICA') + response['services'][1]['schedulingStrategy'].should.equal('DAEMON') + response['services'][2]['schedulingStrategy'].should.equal('REPLICA') + + +@mock_ecs +def test_update_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + response['service']['desiredCount'].should.equal(2) + + response = client.update_service( + cluster='test_ecs_cluster', + service='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=0 + ) + response['service']['desiredCount'].should.equal(0) + response['service']['schedulingStrategy'].should.equal('REPLICA') + + +@mock_ecs +def test_update_missing_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + + client.update_service.when.called_with( + cluster='test_ecs_cluster', + service='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=0 + ).should.throw(ClientError) + + +@mock_ecs +def test_delete_service(): + client = boto3.client('ecs', region_name='us-east-1') + _ = client.create_cluster( + clusterName='test_ecs_cluster' + ) + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + _ = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2 + ) + _ = client.update_service( + cluster='test_ecs_cluster', + service='test_ecs_service', + desiredCount=0 + ) + response = client.delete_service( + cluster='test_ecs_cluster', + service='test_ecs_service' + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(0) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(0) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['schedulingStrategy'].should.equal('REPLICA') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + + + +@mock_ec2 +@mock_ecs +def test_register_container_instance(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + arn_part = full_arn.split('/') + arn_part[0].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance') + arn_part[1].should.equal(str(UUID(arn_part[1]))) + response['containerInstance']['status'].should.equal('ACTIVE') + len(response['containerInstance']['registeredResources']).should.equal(4) + len(response['containerInstance']['remainingResources']).should.equal(4) + response['containerInstance']['agentConnected'].should.equal(True) + response['containerInstance']['versionInfo'][ + 'agentVersion'].should.equal('1.0.0') + response['containerInstance']['versionInfo'][ + 'agentHash'].should.equal('4023248') + response['containerInstance']['versionInfo'][ + 'dockerVersion'].should.equal('DockerVersion: 1.5.0') + + +@mock_ec2 +@mock_ecs +def test_deregister_container_instance(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + container_instance_id = response['containerInstance']['containerInstanceArn'] + response = ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id + ) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(0) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + container_instance_id = response['containerInstance']['containerInstanceArn'] + _ = ecs_client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + response = ecs_client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='moto' + ) + with assert_raises(Exception) as e: + ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id + ).should.have.raised(Exception) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(1) + ecs_client.deregister_container_instance( + cluster=test_cluster_name, + containerInstance=container_instance_id, + force=True + ) + container_instances_response = ecs_client.list_container_instances( + cluster=test_cluster_name + ) + len(container_instances_response['containerInstanceArns']).should.equal(0) + + +@mock_ec2 +@mock_ecs +def test_list_container_instances(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) + + response = ecs_client.list_container_instances(cluster=test_cluster_name) + + len(response['containerInstanceArns']).should.equal(instance_to_create) + for arn in test_instance_arns: + response['containerInstanceArns'].should.contain(arn) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance'][ + 'containerInstanceArn']) + + test_instance_ids = list( + map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.describe_container_instances( + cluster=test_cluster_name, containerInstances=test_instance_ids) + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_arns = [ci['containerInstanceArn'] + for ci in response['containerInstances']] + for arn in test_instance_arns: + response_arns.should.contain(arn) + for instance in response['containerInstances']: + instance.keys().should.contain('runningTasksCount') + instance.keys().should.contain('pendingTasksCount') + + +@mock_ec2 +@mock_ecs +def test_update_container_instances_state(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instance_to_create = 3 + test_instance_arns = [] + for i in range(0, instance_to_create): + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document) + + test_instance_arns.append(response['containerInstance']['containerInstanceArn']) + + test_instance_ids = list(map((lambda x: x.split('/')[1]), test_instance_arns)) + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='DRAINING') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('DRAINING') + response = ecs_client.update_container_instances_state(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='ACTIVE') + len(response['failures']).should.equal(0) + len(response['containerInstances']).should.equal(instance_to_create) + response_statuses = [ci['status'] for ci in response['containerInstances']] + for status in response_statuses: + status.should.equal('ACTIVE') + ecs_client.update_container_instances_state.when.called_with(cluster=test_cluster_name, + containerInstances=test_instance_ids, + status='test_status').should.throw(Exception) + + +@mock_ec2 +@mock_ecs +def test_run_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(2) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ec2 +@mock_ecs +def test_start_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + response = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='moto' + ) + + len(response['tasks']).should.equal(1) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/{0}'.format(container_instance_id)) + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ec2 +@mock_ecs +def test_list_tasks(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + container_instances = client.list_container_instances( + cluster=test_cluster_name) + container_instance_id = container_instances[ + 'containerInstanceArns'][0].split('/')[-1] + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + _ = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='foo' + ) + + _ = client.start_task( + cluster='test_ecs_cluster', + taskDefinition='test_ecs_task', + overrides={}, + containerInstances=[container_instance_id], + startedBy='bar' + ) + + assert len(client.list_tasks()['taskArns']).should.equal(2) + assert len(client.list_tasks(cluster='test_ecs_cluster') + ['taskArns']).should.equal(2) + assert len(client.list_tasks(startedBy='foo')['taskArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_describe_tasks(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + tasks_arns = [ + task['taskArn'] for task in client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + )['tasks'] + ] + response = client.describe_tasks( + cluster='test_ecs_cluster', + tasks=tasks_arns + ) + + len(response['tasks']).should.equal(2) + set([response['tasks'][0]['taskArn'], response['tasks'] + [1]['taskArn']]).should.equal(set(tasks_arns)) + + # Test we can pass task ids instead of ARNs + response = client.describe_tasks( + cluster='test_ecs_cluster', + tasks=[tasks_arns[0].split("/")[-1]] + ) + len(response['tasks']).should.equal(1) + + +@mock_ecs +def describe_task_definition(): + client = boto3.client('ecs', region_name='us-east-1') + container_definition = { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + task_definition = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[container_definition] + ) + family = task_definition['family'] + task = client.describe_task_definition(taskDefinition=family) + task['containerDefinitions'][0].should.equal(container_definition) + task['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task2:1') + task['volumes'].should.equal([]) + + +@mock_ec2 +@mock_ecs +def test_stop_task(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + stop_response = client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + + stop_response['task']['taskArn'].should.equal( + run_response['tasks'][0].get('taskArn')) + stop_response['task']['lastStatus'].should.equal('STOPPED') + stop_response['task']['desiredStatus'].should.equal('STOPPED') + stop_response['task']['stoppedReason'].should.equal('moto testing') + + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'hostPort': 80, + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + registered_resources['PORTS'].append('80') + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release_memory_reservation(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'memoryReservation': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + } + } + } + template_json = json.dumps(template) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(0) + + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_cluster_name_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster1" + } + } + } + } + template2 = deepcopy(template1) + template2['Resources']['testCluster'][ + 'Properties']['ClusterName'] = 'testcluster2' + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_resp = cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName=stack_resp['StackId'], + TemplateBody=template2_json + ) + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + resp['clusterArns'][0].endswith('testcluster2').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_task_definition_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_name = 'test_stack' + cfn_conn.create_stack( + StackName=stack_name, + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions() + len(resp['taskDefinitionArns']).should.equal(1) + task_definition_arn = resp['taskDefinitionArns'][0] + + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] + task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) + +@mock_ec2 +@mock_ecs +def test_task_definitions_unable_to_be_placed(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 5000, + 'memory': 40000, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(0) + + +@mock_ec2 +@mock_ecs +def test_task_definitions_with_port_clash(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 256, + 'memory': 512, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'hostPort': 80, + 'containerPort': 8080 + } + ] + } + ] + ) + response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=2, + startedBy='moto' + ) + len(response['tasks']).should.equal(1) + response['tasks'][0]['taskArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:task/') + response['tasks'][0]['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['tasks'][0]['taskDefinitionArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') + response['tasks'][0]['containerInstanceArn'].should.contain( + 'arn:aws:ecs:us-east-1:012345678910:container-instance/') + response['tasks'][0]['overrides'].should.equal({}) + response['tasks'][0]['lastStatus'].should.equal("RUNNING") + response['tasks'][0]['desiredStatus'].should.equal("RUNNING") + response['tasks'][0]['startedBy'].should.equal("moto") + response['tasks'][0]['stoppedReason'].should.equal("") + + +@mock_ecs +@mock_cloudformation +def test_update_task_definition_family_through_cloudformation_should_trigger_a_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "Family": "testTaskDefinition1", + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + } + } + } + template1_json = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template1_json, + ) + + template2 = deepcopy(template1) + template2['Resources']['testTaskDefinition'][ + 'Properties']['Family'] = 'testTaskDefinition2' + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_task_definitions(familyPrefix='testTaskDefinition') + len(resp['taskDefinitionArns']).should.equal(1) + resp['taskDefinitionArns'][0].endswith( + 'testTaskDefinition2:1').should.be.true + + +@mock_ecs +@mock_cloudformation +def test_create_service_through_cloudformation(): + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "DesiredCount": 10, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + } + } + } + } + template_json = json.dumps(template) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_update_service_through_cloudformation_should_trigger_replacement(): + template1 = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + "Properties": { + "ClusterName": "testcluster" + } + }, + "testTaskDefinition": { + "Type": "AWS::ECS::TaskDefinition", + "Properties": { + "ContainerDefinitions": [ + { + "Name": "ecs-sample", + "Image": "amazon/amazon-ecs-sample", + "Cpu": "200", + "Memory": "500", + "Essential": "true" + } + ], + "Volumes": [], + } + }, + "testService": { + "Type": "AWS::ECS::Service", + "Properties": { + "Cluster": {"Ref": "testCluster"}, + "TaskDefinition": {"Ref": "testTaskDefinition"}, + "DesiredCount": 10, + } + } + } + } + template_json1 = json.dumps(template1) + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json1, + ) + template2 = deepcopy(template1) + template2['Resources']['testService']['Properties']['DesiredCount'] = 5 + template2_json = json.dumps(template2) + cfn_conn.update_stack( + StackName="test_stack", + TemplateBody=template2_json, + ) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_services(cluster='testcluster') + len(resp['serviceArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_attributes(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instances = [] + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn1 = response['containerInstance']['containerInstanceArn'] + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn2 = response['containerInstance']['containerInstanceArn'] + partial_arn2 = full_arn2.rsplit('/', 1)[-1] + + full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) + + # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} + ] + ) + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + + NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) + NUM_DEFAULT_ATTRIBUTES = 4 + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + # Tests that the attrs have been set properly + len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) + len(list( + filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + + ecs_client.delete_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} + ] + ) + NUM_CUSTOM_ATTRIBUTES -= 1 + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + +@mock_ecs +def test_poll_endpoint(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + + # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception + resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') + resp.should.contain('endpoint') + resp.should.contain('telemetryEndpoint') + + +@mock_ecs +def test_list_task_definition_families(): + client = boto3.client('ecs', region_name='us-east-1') + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + client.register_task_definition( + family='alt_test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + resp1 = client.list_task_definition_families() + resp2 = client.list_task_definition_families(familyPrefix='alt') + + len(resp1['families']).should.equal(2) + len(resp2['families']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_default_container_instance_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + + default_attributes = response['containerInstance']['attributes'] + assert len(default_attributes) == 4 + expected_result = [ + {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, + {'name': 'ecs.ami-id', 'value': test_instance.image_id}, + {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, + {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} + ] + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, + key=lambda item: item['name']) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances_with_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + default_attributes = response['containerInstance']['attributes'] + + # Set attributes on container instance, one without a value + attributes = [ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, + 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=attributes + ) + + # Describe container instance, should have attributes previously set + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, + containerInstances=[container_instance_id]) + + assert len(described_instance['containerInstances']) == 1 + assert isinstance(described_instance['containerInstances'][0]['attributes'], list) + + # Remove additional info passed to put_attributes + cleaned_attributes = [] + for attribute in attributes: + attribute.pop('targetId', None) + attribute.pop('targetType', None) + cleaned_attributes.append(attribute) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], + key=lambda item: item['name']) + expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) + assert described_attributes == expected_attributes + + +def _fetch_container_instance_resources(container_instance_description): + remaining_resources = {} + registered_resources = {} + remaining_resources_list = container_instance_description['remainingResources'] + registered_resources_list = container_instance_description['registeredResources'] + remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ + 0] + remaining_resources['MEMORY'] = \ + [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] + remaining_resources['PORTS'] = \ + [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] + registered_resources['CPU'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] + registered_resources['MEMORY'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] + registered_resources['PORTS'] = \ + [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] + return remaining_resources, registered_resources + + +@mock_ecs +def test_create_service_load_balancing(): + client = boto3.client('ecs', region_name='us-east-1') + client.create_cluster( + clusterName='test_ecs_cluster' + ) + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + response = client.create_service( + cluster='test_ecs_cluster', + serviceName='test_ecs_service', + taskDefinition='test_ecs_task', + desiredCount=2, + loadBalancers=[ + { + 'targetGroupArn': 'test_target_group_arn', + 'loadBalancerName': 'test_load_balancer_name', + 'containerName': 'test_container_name', + 'containerPort': 123 + } + ] + ) + response['service']['clusterArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:cluster/test_ecs_cluster') + response['service']['desiredCount'].should.equal(2) + len(response['service']['events']).should.equal(0) + len(response['service']['loadBalancers']).should.equal(1) + response['service']['loadBalancers'][0]['targetGroupArn'].should.equal( + 'test_target_group_arn') + response['service']['loadBalancers'][0]['loadBalancerName'].should.equal( + 'test_load_balancer_name') + response['service']['loadBalancers'][0]['containerName'].should.equal( + 'test_container_name') + response['service']['loadBalancers'][0]['containerPort'].should.equal(123) + response['service']['pendingCount'].should.equal(0) + response['service']['runningCount'].should.equal(0) + response['service']['serviceArn'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:service/test_ecs_service') + response['service']['serviceName'].should.equal('test_ecs_service') + response['service']['status'].should.equal('ACTIVE') + response['service']['taskDefinition'].should.equal( + 'arn:aws:ecs:us-east-1:012345678910:task-definition/test_ecs_task:1') diff --git a/tests/test_elb/test_elb.py b/tests/test_elb/test_elb.py index a67508430..6c6492894 100644 --- a/tests/test_elb/test_elb.py +++ b/tests/test_elb/test_elb.py @@ -1,982 +1,982 @@ -from __future__ import unicode_literals -import boto3 -import botocore -import boto -import boto.ec2.elb -from boto.ec2.elb import HealthCheck -from boto.ec2.elb.attributes import ( - ConnectionSettingAttribute, - ConnectionDrainingAttribute, - AccessLogAttribute, -) -from botocore.exceptions import ClientError -from boto.exception import BotoServerError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated - - -@mock_elb_deprecated -@mock_ec2_deprecated -def test_create_load_balancer(): - conn = boto.connect_elb() - ec2 = boto.connect_ec2('the_key', 'the_secret') - - security_group = ec2.create_security_group('sg-abc987', 'description') - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internal") - list(balancer.security_groups).should.equal([security_group.id]) - set(balancer.availability_zones).should.equal( - set(['us-east-1a', 'us-east-1b'])) - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_getting_missing_elb(): - conn = boto.connect_elb() - conn.get_all_load_balancers.when.called_with( - load_balancer_names='aaa').should.throw(BotoServerError) - - -@mock_elb_deprecated -def test_create_elb_in_multiple_region(): - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - - west1_conn = boto.ec2.elb.connect_to_region("us-west-1") - west1_conn.create_load_balancer('my-lb', zones, ports) - - west2_conn = boto.ec2.elb.connect_to_region("us-west-2") - west2_conn.create_load_balancer('my-lb', zones, ports) - - list(west1_conn.get_all_load_balancers()).should.have.length_of(1) - list(west2_conn.get_all_load_balancers()).should.have.length_of(1) - - -@mock_elb_deprecated -def test_create_load_balancer_with_certificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [ - (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - balancer.name.should.equal("my-lb") - balancer.scheme.should.equal("internet-facing") - set(balancer.availability_zones).should.equal(set(['us-east-1a'])) - listener = balancer.listeners[0] - listener.load_balancer_port.should.equal(443) - listener.instance_port.should.equal(8443) - listener.protocol.should.equal("HTTPS") - listener.ssl_certificate_id.should.equal( - 'arn:aws:iam:123456789012:server-certificate/test-cert') - - -@mock_elb -def test_create_and_delete_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.delete_load_balancer( - LoadBalancerName='my-lb' - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(0) - - -@mock_elb -def test_create_load_balancer_with_no_listeners_defined(): - client = boto3.client('elb', region_name='us-east-1') - - with assert_raises(ClientError): - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - -@mock_elb -def test_describe_paginated_balancers(): - client = boto3.client('elb', region_name='us-east-1') - - for i in range(51): - client.create_load_balancer( - LoadBalancerName='my-lb%d' % i, - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - resp = client.describe_load_balancers() - resp['LoadBalancerDescriptions'].should.have.length_of(50) - resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) - resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancerDescriptions'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elb -@mock_ec2 -def test_apply_security_groups_to_load_balancer(): - client = boto3.client('elb', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - security_group = ec2.create_security_group( - GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=[security_group.id]) - - assert response['SecurityGroups'] == [security_group.id] - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - assert balancer['SecurityGroups'] == [security_group.id] - - # Using a not-real security group raises an error - with assert_raises(ClientError) as error: - response = client.apply_security_groups_to_load_balancer( - LoadBalancerName='my-lb', - SecurityGroups=['not-really-a-security-group']) - assert "One or more of the specified security groups do not exist." in str(error.exception) - - -@mock_elb_deprecated -def test_add_listener(): - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http')] - conn.create_load_balancer('my-lb', zones, ports) - new_listener = (443, 8443, 'tcp') - conn.create_load_balancer_listeners('my-lb', [new_listener]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - listener2 = balancer.listeners[1] - listener2.load_balancer_port.should.equal(443) - listener2.instance_port.should.equal(8443) - listener2.protocol.should.equal("TCP") - - -@mock_elb_deprecated -def test_delete_listener(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.delete_load_balancer_listeners('my-lb', [443]) - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(80) - listener1.instance_port.should.equal(8080) - listener1.protocol.should.equal("HTTP") - balancer.listeners.should.have.length_of(1) - - -@mock_elb -def test_create_and_delete_listener_boto3_support(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(2) - balancer['ListenerDescriptions'][0][ - 'Listener']['Protocol'].should.equal('HTTP') - balancer['ListenerDescriptions'][0]['Listener'][ - 'LoadBalancerPort'].should.equal(80) - balancer['ListenerDescriptions'][0]['Listener'][ - 'InstancePort'].should.equal(8080) - balancer['ListenerDescriptions'][1][ - 'Listener']['Protocol'].should.equal('TCP') - balancer['ListenerDescriptions'][1]['Listener'][ - 'LoadBalancerPort'].should.equal(443) - balancer['ListenerDescriptions'][1]['Listener'][ - 'InstancePort'].should.equal(8443) - - # Creating this listener with an conflicting definition throws error - with assert_raises(ClientError): - client.create_load_balancer_listeners( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] - ) - - client.delete_load_balancer_listeners( - LoadBalancerName='my-lb', - LoadBalancerPorts=[443]) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - list(balancer['ListenerDescriptions']).should.have.length_of(1) - - -@mock_elb_deprecated -def test_set_sslcertificate(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') - balancers = conn.get_all_load_balancers() - balancer = balancers[0] - listener1 = balancer.listeners[0] - listener1.load_balancer_port.should.equal(443) - listener1.instance_port.should.equal(8443) - listener1.protocol.should.equal("TCP") - listener1.ssl_certificate_id.should.equal("arn:certificate") - - -@mock_elb_deprecated -def test_get_load_balancers_by_name(): - conn = boto.connect_elb() - - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb1', zones, ports) - conn.create_load_balancer('my-lb2', zones, ports) - conn.create_load_balancer('my-lb3', zones, ports) - - conn.get_all_load_balancers().should.have.length_of(3) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1']).should.have.length_of(1) - conn.get_all_load_balancers( - load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) - - -@mock_elb_deprecated -def test_delete_load_balancer(): - conn = boto.connect_elb() - - zones = ['us-east-1a'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', zones, ports) - - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(1) - - conn.delete_load_balancer("my-lb") - balancers = conn.get_all_load_balancers() - balancers.should.have.length_of(0) - - -@mock_elb_deprecated -def test_create_health_check(): - conn = boto.connect_elb() - - hc = HealthCheck( - interval=20, - healthy_threshold=3, - unhealthy_threshold=5, - target='HTTP:8080/health', - timeout=23, - ) - - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - lb.configure_health_check(hc) - - balancer = conn.get_all_load_balancers()[0] - health_check = balancer.health_check - health_check.interval.should.equal(20) - health_check.healthy_threshold.should.equal(3) - health_check.unhealthy_threshold.should.equal(5) - health_check.target.should.equal('HTTP:8080/health') - health_check.timeout.should.equal(23) - - -@mock_elb -def test_create_health_check_boto3(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.configure_health_check( - LoadBalancerName='my-lb', - HealthCheck={ - 'Target': 'HTTP:8080/health', - 'Interval': 20, - 'Timeout': 23, - 'HealthyThreshold': 3, - 'UnhealthyThreshold': 5 - } - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') - balancer['HealthCheck']['Interval'].should.equal(20) - balancer['HealthCheck']['Timeout'].should.equal(23) - balancer['HealthCheck']['HealthyThreshold'].should.equal(3) - balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_register_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - instance_ids = [instance.id for instance in balancer.instances] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2 -@mock_elb -def test_register_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - instance_ids = [instance['InstanceId'] - for instance in balancer['Instances']] - set(instance_ids).should.equal(set([instance_id1, instance_id2])) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_deregister_instances(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - lb.register_instances([instance_id1, instance_id2]) - - balancer = conn.get_all_load_balancers()[0] - balancer.instances.should.have.length_of(2) - balancer.deregister_instances([instance_id1]) - - balancer.instances.should.have.length_of(1) - balancer.instances[0].id.should.equal(instance_id2) - - -@mock_ec2 -@mock_elb -def test_deregister_instances_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'http', - 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - client.register_instances_with_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1}, - {'InstanceId': instance_id2} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(2) - - client.deregister_instances_from_load_balancer( - LoadBalancerName='my-lb', - Instances=[ - {'InstanceId': instance_id1} - ] - ) - - balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - balancer['Instances'].should.have.length_of(1) - balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) - - -@mock_elb_deprecated -def test_default_attributes(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - attributes = lb.get_attributes() - - attributes.cross_zone_load_balancing.enabled.should.be.false - attributes.connection_draining.enabled.should.be.false - attributes.access_log.enabled.should.be.false - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_cross_zone_load_balancing_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.true - - conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) - attributes = lb.get_attributes(force=True) - attributes.cross_zone_load_balancing.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_draining_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_draining = ConnectionDrainingAttribute() - connection_draining.enabled = True - connection_draining.timeout = 60 - - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.true - attributes.connection_draining.timeout.should.equal(60) - - connection_draining.timeout = 30 - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.timeout.should.equal(30) - - connection_draining.enabled = False - conn.modify_lb_attribute( - "my-lb", "ConnectionDraining", connection_draining) - attributes = lb.get_attributes(force=True) - attributes.connection_draining.enabled.should.be.false - - -@mock_elb_deprecated -def test_access_log_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - access_log = AccessLogAttribute() - access_log.enabled = True - access_log.s3_bucket_name = 'bucket' - access_log.s3_bucket_prefix = 'prefix' - access_log.emit_interval = 60 - - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.true - attributes.access_log.s3_bucket_name.should.equal("bucket") - attributes.access_log.s3_bucket_prefix.should.equal("prefix") - attributes.access_log.emit_interval.should.equal(60) - - access_log.enabled = False - conn.modify_lb_attribute("my-lb", "AccessLog", access_log) - attributes = lb.get_attributes(force=True) - attributes.access_log.enabled.should.be.false - - -@mock_elb_deprecated -def test_connection_settings_attribute(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - - connection_settings = ConnectionSettingAttribute(conn) - connection_settings.idle_timeout = 120 - - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(120) - - connection_settings.idle_timeout = 60 - conn.modify_lb_attribute( - "my-lb", "ConnectingSettings", connection_settings) - attributes = lb.get_attributes(force=True) - attributes.connecting_settings.idle_timeout.should.equal(60) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_expiration_period = 60 - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) - - lb = conn.get_all_load_balancers()[0] - # There appears to be a quirk about boto, whereby it returns a unicode - # string for cookie_expiration_period, despite being stated in - # documentation to be a long numeric. - # - # To work around that, this value is converted to an int and checked. - cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period - int(cookie_expiration_period_response_str).should.equal( - cookie_expiration_period) - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_cookie_stickiness_policy_no_expiry(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "LBCookieStickinessPolicy" - - lb.create_cookie_stickiness_policy(None, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.lb_cookie_stickiness_policies[ - 0].cookie_expiration_period.should.be.none - lb.policies.lb_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_app_cookie_stickiness_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - cookie_name = "my-stickiness-policy" - policy_name = "AppCookieStickinessPolicy" - - lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) - - lb = conn.get_all_load_balancers()[0] - lb.policies.app_cookie_stickiness_policies[ - 0].cookie_name.should.equal(cookie_name) - lb.policies.app_cookie_stickiness_policies[ - 0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_create_lb_policy(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - policy_name = "ProxyPolicy" - - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - - lb = conn.get_all_load_balancers()[0] - lb.policies.other_policies[0].policy_name.should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_listener(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - listener_port = 80 - policy_name = "my-stickiness-policy" - - # boto docs currently state that zero or one policy may be associated - # with a given listener - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the listener - lb.create_cookie_stickiness_policy(None, policy_name) - lb.set_policies_of_listener(listener_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - listener = lb.listeners[0] - listener.load_balancer_port.should.equal(listener_port) - # by contrast to a backend, a listener stores only policy name strings - listener.policy_names[0].should.equal(policy_name) - - -@mock_elb_deprecated -def test_set_policies_of_backend_server(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', [], ports) - instance_port = 8080 - policy_name = "ProxyPolicy" - - # in a real flow, it is necessary first to create a policy, - # then to set that policy to the backend - lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { - 'ProxyProtocol': True}) - lb.set_policies_of_backend_server(instance_port, [policy_name]) - - lb = conn.get_all_load_balancers()[0] - backend = lb.backends[0] - backend.instance_port.should.equal(instance_port) - # by contrast to a listener, a backend stores OtherPolicy objects - backend.policies[0].policy_name.should.equal(policy_name) - - -@mock_ec2_deprecated -@mock_elb_deprecated -def test_describe_instance_health(): - ec2_conn = boto.connect_ec2() - reservation = ec2_conn.run_instances('ami-1234abcd', 2) - instance_id1 = reservation.instances[0].id - instance_id2 = reservation.instances[1].id - - conn = boto.connect_elb() - zones = ['us-east-1a', 'us-east-1b'] - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - lb = conn.create_load_balancer('my-lb', zones, ports) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.be.empty - - lb.register_instances([instance_id1, instance_id2]) - - instances_health = conn.describe_instance_health('my-lb') - instances_health.should.have.length_of(2) - for instance_health in instances_health: - instance_health.instance_id.should.be.within( - [instance_id1, instance_id2]) - instance_health.state.should.equal('InService') - - instances_health = conn.describe_instance_health('my-lb', [instance_id1]) - instances_health.should.have.length_of(1) - instances_health[0].instance_id.should.equal(instance_id1) - instances_health[0].state.should.equal('InService') - - -@mock_ec2 -@mock_elb -def test_describe_instance_health_boto3(): - elb = boto3.client('elb', region_name="us-east-1") - ec2 = boto3.client('ec2', region_name="us-east-1") - instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] - lb_name = "my_load_balancer" - elb.create_load_balancer( - Listeners=[{ - 'InstancePort': 80, - 'LoadBalancerPort': 8080, - 'Protocol': 'HTTP' - }], - LoadBalancerName=lb_name, - ) - elb.register_instances_with_load_balancer( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instances[0]['InstanceId']}] - ) - instances_health = elb.describe_instance_health( - LoadBalancerName=lb_name, - Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] - ) - instances_health['InstanceStates'].should.have.length_of(2) - instances_health['InstanceStates'][0]['InstanceId'].\ - should.equal(instances[0]['InstanceId']) - instances_health['InstanceStates'][0]['State'].\ - should.equal('InService') - instances_health['InstanceStates'][1]['InstanceId'].\ - should.equal(instances[1]['InstanceId']) - instances_health['InstanceStates'][1]['State'].\ - should.equal('Unknown') - - -@mock_elb -def test_add_remove_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - list(client.describe_load_balancers()[ - 'LoadBalancerDescriptions']).should.have.length_of(1) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - tags.should.have.key('a').which.should.equal('b') - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'i', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - client.add_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - client.remove_tags(LoadBalancerNames=['my-lb'], - Tags=[{ - 'Key': 'a' - }]) - - tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('i').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - client.create_load_balancer( - LoadBalancerName='other-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - client.add_tags(LoadBalancerNames=['other-lb'], - Tags=[{ - 'Key': 'other', - 'Value': 'something' - }]) - - lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) - for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) - - lb_tags.should.have.key('my-lb') - lb_tags.should.have.key('other-lb') - - lb_tags['my-lb'].shouldnt.have.key('other') - lb_tags[ - 'other-lb'].should.have.key('other').which.should.equal('something') - - -@mock_elb -def test_create_with_tags(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'], - Tags=[{ - 'Key': 'k', - 'Value': 'v' - }] - ) - - tags = dict((d['Key'], d['Value']) for d in client.describe_tags( - LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) - tags.should.have.key('k').which.should.equal('v') - - -@mock_elb -def test_modify_attributes(): - client = boto3.client('elb', region_name='us-east-1') - - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - AvailabilityZones=['us-east-1a', 'us-east-1b'] - ) - - # Default ConnectionDraining timeout of 300 seconds - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': {'Enabled': True}, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) - - # specify a custom ConnectionDraining timeout - client.modify_load_balancer_attributes( - LoadBalancerName='my-lb', - LoadBalancerAttributes={ - 'ConnectionDraining': { - 'Enabled': True, - 'Timeout': 45, - }, - } - ) - lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) - lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) - - -@mock_ec2 -@mock_elb -def test_subnets(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default' - ) - subnet = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26' - ) - client = boto3.client('elb', region_name='us-east-1') - client.create_load_balancer( - LoadBalancerName='my-lb', - Listeners=[ - {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], - Subnets=[subnet.id] - ) - - lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] - lb.should.have.key('Subnets').which.should.have.length_of(1) - lb['Subnets'][0].should.equal(subnet.id) - - lb.should.have.key('VPCId').which.should.equal(vpc.id) - - -@mock_elb_deprecated -def test_create_load_balancer_duplicate(): - conn = boto.connect_elb() - ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] - conn.create_load_balancer('my-lb', [], ports) - conn.create_load_balancer.when.called_with( - 'my-lb', [], ports).should.throw(BotoServerError) +from __future__ import unicode_literals +import boto3 +import botocore +import boto +import boto.ec2.elb +from boto.ec2.elb import HealthCheck +from boto.ec2.elb.attributes import ( + ConnectionSettingAttribute, + ConnectionDrainingAttribute, + AccessLogAttribute, +) +from botocore.exceptions import ClientError +from boto.exception import BotoServerError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elb, mock_ec2, mock_elb_deprecated, mock_ec2_deprecated + + +@mock_elb_deprecated +@mock_ec2_deprecated +def test_create_load_balancer(): + conn = boto.connect_elb() + ec2 = boto.connect_ec2('the_key', 'the_secret') + + security_group = ec2.create_security_group('sg-abc987', 'description') + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports, scheme='internal', security_groups=[security_group.id]) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internal") + list(balancer.security_groups).should.equal([security_group.id]) + set(balancer.availability_zones).should.equal( + set(['us-east-1a', 'us-east-1b'])) + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_getting_missing_elb(): + conn = boto.connect_elb() + conn.get_all_load_balancers.when.called_with( + load_balancer_names='aaa').should.throw(BotoServerError) + + +@mock_elb_deprecated +def test_create_elb_in_multiple_region(): + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + + west1_conn = boto.ec2.elb.connect_to_region("us-west-1") + west1_conn.create_load_balancer('my-lb', zones, ports) + + west2_conn = boto.ec2.elb.connect_to_region("us-west-2") + west2_conn.create_load_balancer('my-lb', zones, ports) + + list(west1_conn.get_all_load_balancers()).should.have.length_of(1) + list(west2_conn.get_all_load_balancers()).should.have.length_of(1) + + +@mock_elb_deprecated +def test_create_load_balancer_with_certificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [ + (443, 8443, 'https', 'arn:aws:iam:123456789012:server-certificate/test-cert')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + balancer.name.should.equal("my-lb") + balancer.scheme.should.equal("internet-facing") + set(balancer.availability_zones).should.equal(set(['us-east-1a'])) + listener = balancer.listeners[0] + listener.load_balancer_port.should.equal(443) + listener.instance_port.should.equal(8443) + listener.protocol.should.equal("HTTPS") + listener.ssl_certificate_id.should.equal( + 'arn:aws:iam:123456789012:server-certificate/test-cert') + + +@mock_elb +def test_create_and_delete_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.delete_load_balancer( + LoadBalancerName='my-lb' + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(0) + + +@mock_elb +def test_create_load_balancer_with_no_listeners_defined(): + client = boto3.client('elb', region_name='us-east-1') + + with assert_raises(ClientError): + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + +@mock_elb +def test_describe_paginated_balancers(): + client = boto3.client('elb', region_name='us-east-1') + + for i in range(51): + client.create_load_balancer( + LoadBalancerName='my-lb%d' % i, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + resp = client.describe_load_balancers() + resp['LoadBalancerDescriptions'].should.have.length_of(50) + resp['NextMarker'].should.equal(resp['LoadBalancerDescriptions'][-1]['LoadBalancerName']) + resp2 = client.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancerDescriptions'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elb +@mock_ec2 +def test_apply_security_groups_to_load_balancer(): + client = boto3.client('elb', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + security_group = ec2.create_security_group( + GroupName='sg01', Description='Test security group sg01', VpcId=vpc.id) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=[security_group.id]) + + assert response['SecurityGroups'] == [security_group.id] + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + assert balancer['SecurityGroups'] == [security_group.id] + + # Using a not-real security group raises an error + with assert_raises(ClientError) as error: + response = client.apply_security_groups_to_load_balancer( + LoadBalancerName='my-lb', + SecurityGroups=['not-really-a-security-group']) + assert "One or more of the specified security groups do not exist." in str(error.exception) + + +@mock_elb_deprecated +def test_add_listener(): + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http')] + conn.create_load_balancer('my-lb', zones, ports) + new_listener = (443, 8443, 'tcp') + conn.create_load_balancer_listeners('my-lb', [new_listener]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + listener2 = balancer.listeners[1] + listener2.load_balancer_port.should.equal(443) + listener2.instance_port.should.equal(8443) + listener2.protocol.should.equal("TCP") + + +@mock_elb_deprecated +def test_delete_listener(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.delete_load_balancer_listeners('my-lb', [443]) + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(80) + listener1.instance_port.should.equal(8080) + listener1.protocol.should.equal("HTTP") + balancer.listeners.should.have.length_of(1) + + +@mock_elb +def test_create_and_delete_listener_boto3_support(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 8443}] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(2) + balancer['ListenerDescriptions'][0][ + 'Listener']['Protocol'].should.equal('HTTP') + balancer['ListenerDescriptions'][0]['Listener'][ + 'LoadBalancerPort'].should.equal(80) + balancer['ListenerDescriptions'][0]['Listener'][ + 'InstancePort'].should.equal(8080) + balancer['ListenerDescriptions'][1][ + 'Listener']['Protocol'].should.equal('TCP') + balancer['ListenerDescriptions'][1]['Listener'][ + 'LoadBalancerPort'].should.equal(443) + balancer['ListenerDescriptions'][1]['Listener'][ + 'InstancePort'].should.equal(8443) + + # Creating this listener with an conflicting definition throws error + with assert_raises(ClientError): + client.create_load_balancer_listeners( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 443, 'InstancePort': 1234}] + ) + + client.delete_load_balancer_listeners( + LoadBalancerName='my-lb', + LoadBalancerPorts=[443]) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + list(balancer['ListenerDescriptions']).should.have.length_of(1) + + +@mock_elb_deprecated +def test_set_sslcertificate(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + conn.set_lb_listener_SSL_certificate('my-lb', '443', 'arn:certificate') + balancers = conn.get_all_load_balancers() + balancer = balancers[0] + listener1 = balancer.listeners[0] + listener1.load_balancer_port.should.equal(443) + listener1.instance_port.should.equal(8443) + listener1.protocol.should.equal("TCP") + listener1.ssl_certificate_id.should.equal("arn:certificate") + + +@mock_elb_deprecated +def test_get_load_balancers_by_name(): + conn = boto.connect_elb() + + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb1', zones, ports) + conn.create_load_balancer('my-lb2', zones, ports) + conn.create_load_balancer('my-lb3', zones, ports) + + conn.get_all_load_balancers().should.have.length_of(3) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1']).should.have.length_of(1) + conn.get_all_load_balancers( + load_balancer_names=['my-lb1', 'my-lb2']).should.have.length_of(2) + + +@mock_elb_deprecated +def test_delete_load_balancer(): + conn = boto.connect_elb() + + zones = ['us-east-1a'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', zones, ports) + + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(1) + + conn.delete_load_balancer("my-lb") + balancers = conn.get_all_load_balancers() + balancers.should.have.length_of(0) + + +@mock_elb_deprecated +def test_create_health_check(): + conn = boto.connect_elb() + + hc = HealthCheck( + interval=20, + healthy_threshold=3, + unhealthy_threshold=5, + target='HTTP:8080/health', + timeout=23, + ) + + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + lb.configure_health_check(hc) + + balancer = conn.get_all_load_balancers()[0] + health_check = balancer.health_check + health_check.interval.should.equal(20) + health_check.healthy_threshold.should.equal(3) + health_check.unhealthy_threshold.should.equal(5) + health_check.target.should.equal('HTTP:8080/health') + health_check.timeout.should.equal(23) + + +@mock_elb +def test_create_health_check_boto3(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.configure_health_check( + LoadBalancerName='my-lb', + HealthCheck={ + 'Target': 'HTTP:8080/health', + 'Interval': 20, + 'Timeout': 23, + 'HealthyThreshold': 3, + 'UnhealthyThreshold': 5 + } + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['HealthCheck']['Target'].should.equal('HTTP:8080/health') + balancer['HealthCheck']['Interval'].should.equal(20) + balancer['HealthCheck']['Timeout'].should.equal(23) + balancer['HealthCheck']['HealthyThreshold'].should.equal(3) + balancer['HealthCheck']['UnhealthyThreshold'].should.equal(5) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_register_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + instance_ids = [instance.id for instance in balancer.instances] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2 +@mock_elb +def test_register_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + instance_ids = [instance['InstanceId'] + for instance in balancer['Instances']] + set(instance_ids).should.equal(set([instance_id1, instance_id2])) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_deregister_instances(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + lb.register_instances([instance_id1, instance_id2]) + + balancer = conn.get_all_load_balancers()[0] + balancer.instances.should.have.length_of(2) + balancer.deregister_instances([instance_id1]) + + balancer.instances.should.have.length_of(1) + balancer.instances[0].id.should.equal(instance_id2) + + +@mock_ec2 +@mock_elb +def test_deregister_instances_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'http', + 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + client.register_instances_with_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1}, + {'InstanceId': instance_id2} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(2) + + client.deregister_instances_from_load_balancer( + LoadBalancerName='my-lb', + Instances=[ + {'InstanceId': instance_id1} + ] + ) + + balancer = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + balancer['Instances'].should.have.length_of(1) + balancer['Instances'][0]['InstanceId'].should.equal(instance_id2) + + +@mock_elb_deprecated +def test_default_attributes(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + attributes = lb.get_attributes() + + attributes.cross_zone_load_balancing.enabled.should.be.false + attributes.connection_draining.enabled.should.be.false + attributes.access_log.enabled.should.be.false + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_cross_zone_load_balancing_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", True) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.true + + conn.modify_lb_attribute("my-lb", "CrossZoneLoadBalancing", False) + attributes = lb.get_attributes(force=True) + attributes.cross_zone_load_balancing.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_draining_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_draining = ConnectionDrainingAttribute() + connection_draining.enabled = True + connection_draining.timeout = 60 + + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.true + attributes.connection_draining.timeout.should.equal(60) + + connection_draining.timeout = 30 + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.timeout.should.equal(30) + + connection_draining.enabled = False + conn.modify_lb_attribute( + "my-lb", "ConnectionDraining", connection_draining) + attributes = lb.get_attributes(force=True) + attributes.connection_draining.enabled.should.be.false + + +@mock_elb_deprecated +def test_access_log_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + access_log = AccessLogAttribute() + access_log.enabled = True + access_log.s3_bucket_name = 'bucket' + access_log.s3_bucket_prefix = 'prefix' + access_log.emit_interval = 60 + + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.true + attributes.access_log.s3_bucket_name.should.equal("bucket") + attributes.access_log.s3_bucket_prefix.should.equal("prefix") + attributes.access_log.emit_interval.should.equal(60) + + access_log.enabled = False + conn.modify_lb_attribute("my-lb", "AccessLog", access_log) + attributes = lb.get_attributes(force=True) + attributes.access_log.enabled.should.be.false + + +@mock_elb_deprecated +def test_connection_settings_attribute(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + + connection_settings = ConnectionSettingAttribute(conn) + connection_settings.idle_timeout = 120 + + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(120) + + connection_settings.idle_timeout = 60 + conn.modify_lb_attribute( + "my-lb", "ConnectingSettings", connection_settings) + attributes = lb.get_attributes(force=True) + attributes.connecting_settings.idle_timeout.should.equal(60) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_expiration_period = 60 + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(cookie_expiration_period, policy_name) + + lb = conn.get_all_load_balancers()[0] + # There appears to be a quirk about boto, whereby it returns a unicode + # string for cookie_expiration_period, despite being stated in + # documentation to be a long numeric. + # + # To work around that, this value is converted to an int and checked. + cookie_expiration_period_response_str = lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period + int(cookie_expiration_period_response_str).should.equal( + cookie_expiration_period) + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_cookie_stickiness_policy_no_expiry(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "LBCookieStickinessPolicy" + + lb.create_cookie_stickiness_policy(None, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.lb_cookie_stickiness_policies[ + 0].cookie_expiration_period.should.be.none + lb.policies.lb_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_app_cookie_stickiness_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + cookie_name = "my-stickiness-policy" + policy_name = "AppCookieStickinessPolicy" + + lb.create_app_cookie_stickiness_policy(cookie_name, policy_name) + + lb = conn.get_all_load_balancers()[0] + lb.policies.app_cookie_stickiness_policies[ + 0].cookie_name.should.equal(cookie_name) + lb.policies.app_cookie_stickiness_policies[ + 0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_create_lb_policy(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + policy_name = "ProxyPolicy" + + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + + lb = conn.get_all_load_balancers()[0] + lb.policies.other_policies[0].policy_name.should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_listener(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + listener_port = 80 + policy_name = "my-stickiness-policy" + + # boto docs currently state that zero or one policy may be associated + # with a given listener + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the listener + lb.create_cookie_stickiness_policy(None, policy_name) + lb.set_policies_of_listener(listener_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + listener = lb.listeners[0] + listener.load_balancer_port.should.equal(listener_port) + # by contrast to a backend, a listener stores only policy name strings + listener.policy_names[0].should.equal(policy_name) + + +@mock_elb_deprecated +def test_set_policies_of_backend_server(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', [], ports) + instance_port = 8080 + policy_name = "ProxyPolicy" + + # in a real flow, it is necessary first to create a policy, + # then to set that policy to the backend + lb.create_lb_policy(policy_name, 'ProxyProtocolPolicyType', { + 'ProxyProtocol': True}) + lb.set_policies_of_backend_server(instance_port, [policy_name]) + + lb = conn.get_all_load_balancers()[0] + backend = lb.backends[0] + backend.instance_port.should.equal(instance_port) + # by contrast to a listener, a backend stores OtherPolicy objects + backend.policies[0].policy_name.should.equal(policy_name) + + +@mock_ec2_deprecated +@mock_elb_deprecated +def test_describe_instance_health(): + ec2_conn = boto.connect_ec2() + reservation = ec2_conn.run_instances('ami-1234abcd', 2) + instance_id1 = reservation.instances[0].id + instance_id2 = reservation.instances[1].id + + conn = boto.connect_elb() + zones = ['us-east-1a', 'us-east-1b'] + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + lb = conn.create_load_balancer('my-lb', zones, ports) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.be.empty + + lb.register_instances([instance_id1, instance_id2]) + + instances_health = conn.describe_instance_health('my-lb') + instances_health.should.have.length_of(2) + for instance_health in instances_health: + instance_health.instance_id.should.be.within( + [instance_id1, instance_id2]) + instance_health.state.should.equal('InService') + + instances_health = conn.describe_instance_health('my-lb', [instance_id1]) + instances_health.should.have.length_of(1) + instances_health[0].instance_id.should.equal(instance_id1) + instances_health[0].state.should.equal('InService') + + +@mock_ec2 +@mock_elb +def test_describe_instance_health_boto3(): + elb = boto3.client('elb', region_name="us-east-1") + ec2 = boto3.client('ec2', region_name="us-east-1") + instances = ec2.run_instances(MinCount=2, MaxCount=2)['Instances'] + lb_name = "my_load_balancer" + elb.create_load_balancer( + Listeners=[{ + 'InstancePort': 80, + 'LoadBalancerPort': 8080, + 'Protocol': 'HTTP' + }], + LoadBalancerName=lb_name, + ) + elb.register_instances_with_load_balancer( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instances[0]['InstanceId']}] + ) + instances_health = elb.describe_instance_health( + LoadBalancerName=lb_name, + Instances=[{'InstanceId': instance['InstanceId']} for instance in instances] + ) + instances_health['InstanceStates'].should.have.length_of(2) + instances_health['InstanceStates'][0]['InstanceId'].\ + should.equal(instances[0]['InstanceId']) + instances_health['InstanceStates'][0]['State'].\ + should.equal('InService') + instances_health['InstanceStates'][1]['InstanceId'].\ + should.equal(instances[1]['InstanceId']) + instances_health['InstanceStates'][1]['State'].\ + should.equal('Unknown') + + +@mock_elb +def test_add_remove_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + list(client.describe_load_balancers()[ + 'LoadBalancerDescriptions']).should.have.length_of(1) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + tags.should.have.key('a').which.should.equal('b') + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'i', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + client.add_tags.when.called_with(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + client.add_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + client.remove_tags(LoadBalancerNames=['my-lb'], + Tags=[{ + 'Key': 'a' + }]) + + tags = dict([(d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']]) + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('i').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + client.create_load_balancer( + LoadBalancerName='other-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 433, 'InstancePort': 8433}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client.add_tags(LoadBalancerNames=['other-lb'], + Tags=[{ + 'Key': 'other', + 'Value': 'something' + }]) + + lb_tags = dict([(l['LoadBalancerName'], dict([(d['Key'], d['Value']) for d in l['Tags']])) + for l in client.describe_tags(LoadBalancerNames=['my-lb', 'other-lb'])['TagDescriptions']]) + + lb_tags.should.have.key('my-lb') + lb_tags.should.have.key('other-lb') + + lb_tags['my-lb'].shouldnt.have.key('other') + lb_tags[ + 'other-lb'].should.have.key('other').which.should.equal('something') + + +@mock_elb +def test_create_with_tags(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'], + Tags=[{ + 'Key': 'k', + 'Value': 'v' + }] + ) + + tags = dict((d['Key'], d['Value']) for d in client.describe_tags( + LoadBalancerNames=['my-lb'])['TagDescriptions'][0]['Tags']) + tags.should.have.key('k').which.should.equal('v') + + +@mock_elb +def test_modify_attributes(): + client = boto3.client('elb', region_name='us-east-1') + + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[{'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + # Default ConnectionDraining timeout of 300 seconds + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': {'Enabled': True}, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(300) + + # specify a custom ConnectionDraining timeout + client.modify_load_balancer_attributes( + LoadBalancerName='my-lb', + LoadBalancerAttributes={ + 'ConnectionDraining': { + 'Enabled': True, + 'Timeout': 45, + }, + } + ) + lb_attrs = client.describe_load_balancer_attributes(LoadBalancerName='my-lb') + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Enabled'].should.equal(True) + lb_attrs['LoadBalancerAttributes']['ConnectionDraining']['Timeout'].should.equal(45) + + +@mock_ec2 +@mock_elb +def test_subnets(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default' + ) + subnet = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26' + ) + client = boto3.client('elb', region_name='us-east-1') + client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + Subnets=[subnet.id] + ) + + lb = client.describe_load_balancers()['LoadBalancerDescriptions'][0] + lb.should.have.key('Subnets').which.should.have.length_of(1) + lb['Subnets'][0].should.equal(subnet.id) + + lb.should.have.key('VPCId').which.should.equal(vpc.id) + + +@mock_elb_deprecated +def test_create_load_balancer_duplicate(): + conn = boto.connect_elb() + ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] + conn.create_load_balancer('my-lb', [], ports) + conn.create_load_balancer.when.called_with( + 'my-lb', [], ports).should.throw(BotoServerError) diff --git a/tests/test_elb/test_server.py b/tests/test_elb/test_server.py index 0033284d7..159da970d 100644 --- a/tests/test_elb/test_server.py +++ b/tests/test_elb/test_server.py @@ -1,17 +1,17 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_elb_describe_instances(): - backend = server.create_backend_app("elb") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') - - res.data.should.contain(b'DescribeLoadBalancersResponse') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_elb_describe_instances(): + backend = server.create_backend_app("elb") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') + + res.data.should.contain(b'DescribeLoadBalancersResponse') diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index b58345fdb..cf0722bb2 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,1588 +1,1588 @@ -from __future__ import unicode_literals - -import json -import os -import boto3 -import botocore -from botocore.exceptions import ClientError -from nose.tools import assert_raises -import sure # noqa - -from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation -from moto.elbv2 import elbv2_backends - - -@mock_elbv2 -@mock_ec2 -def test_create_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lb = response.get('LoadBalancers')[0] - - lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") - lb.get('LoadBalancerArn').should.equal( - 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') - lb.get('SecurityGroups').should.equal([security_group.id]) - lb.get('AvailabilityZones').should.equal([ - {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, - {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) - - # Ensure the tags persisted - response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) - tags = {d['Key']: d['Value'] - for d in response['TagDescriptions'][0]['Tags']} - tags.should.equal({'key_name': 'a_value'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_load_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.describe_load_balancers() - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - lb.get('LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers( - LoadBalancerArns=[lb.get('LoadBalancerArn')]) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - response = conn.describe_load_balancers(Names=['my-lb']) - response.get('LoadBalancers')[0].get( - 'LoadBalancerName').should.equal('my-lb') - - with assert_raises(ClientError): - conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) - with assert_raises(ClientError): - conn.describe_load_balancers(Names=['nope']) - - -@mock_elbv2 -@mock_ec2 -def test_add_remove_tags(): - conn = boto3.client('elbv2', region_name='us-east-1') - - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - lbs = conn.describe_load_balancers()['LoadBalancers'] - lbs.should.have.length_of(1) - lb = lbs[0] - - with assert_raises(ClientError): - conn.add_tags(ResourceArns=['missing-arn'], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - tags.should.have.key('a').which.should.equal('b') - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'a', - 'Value': 'b' - }, { - 'Key': 'b', - 'Value': 'b' - }, { - 'Key': 'c', - 'Value': 'b' - }, { - 'Key': 'd', - 'Value': 'b' - }, { - 'Key': 'e', - 'Value': 'b' - }, { - 'Key': 'f', - 'Value': 'b' - }, { - 'Key': 'g', - 'Value': 'b' - }, { - 'Key': 'h', - 'Value': 'b' - }, { - 'Key': 'j', - 'Value': 'b' - }]) - - conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'k', - 'Value': 'b' - }]).should.throw(botocore.exceptions.ClientError) - - conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], - Tags=[{ - 'Key': 'j', - 'Value': 'c' - }]) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.should.have.key('a').which.should.equal('b') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - tags.shouldnt.have.key('k') - - conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], - TagKeys=['a']) - - tags = {d['Key']: d['Value'] for d in conn.describe_tags( - ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} - - tags.shouldnt.have.key('a') - tags.should.have.key('b').which.should.equal('b') - tags.should.have.key('c').which.should.equal('b') - tags.should.have.key('d').which.should.equal('b') - tags.should.have.key('e').which.should.equal('b') - tags.should.have.key('f').which.should.equal('b') - tags.should.have.key('g').which.should.equal('b') - tags.should.have.key('h').which.should.equal('b') - tags.should.have.key('j').which.should.equal('c') - - -@mock_elbv2 -@mock_ec2 -def test_create_elb_in_multiple_region(): - for region in ['us-west-1', 'us-west-2']: - conn = boto3.client('elbv2', region_name=region) - ec2 = boto3.resource('ec2', region_name=region) - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc( - CidrBlock='172.28.7.0/24', - InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone=region + 'b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - list( - boto3.client( - 'elbv2', - region_name='us-west-1').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - list( - boto3.client( - 'elbv2', - region_name='us-west-2').describe_load_balancers().get('LoadBalancers') - ).should.have.length_of(1) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_and_listeners(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Add tags to the target group - conn.add_tags(ResourceArns=[target_group_arn], Tags=[ - {'Key': 'target', 'Value': 'group'}]) - conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( - [{'Key': 'target', 'Value': 'group'}]) - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, - Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - - # And another with SSL - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTPS', - Port=443, - Certificates=[ - {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - listener.get('Certificates').should.equal([{ - 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', - }]) - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - - https_listener_arn = listener.get('ListenerArn') - - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - response = conn.describe_listeners(ListenerArns=[https_listener_arn]) - response.get('Listeners').should.have.length_of(1) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(443) - listener.get('Protocol').should.equal('HTTPS') - - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(2) - - # Try to delete the target group and it fails because there's a - # listener referencing it - with assert_raises(ClientError) as e: - conn.delete_target_group( - TargetGroupArn=target_group.get('TargetGroupArn')) - e.exception.operation_name.should.equal('DeleteTargetGroup') - e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA - - # Delete one listener - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(2) - conn.delete_listener(ListenerArn=http_listener_arn) - response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) - response.get('Listeners').should.have.length_of(1) - - # Then delete the load balancer - conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) - - # It's gone - response = conn.describe_load_balancers() - response.get('LoadBalancers').should.have.length_of(0) - - # And it deleted the remaining listener - response = conn.describe_listeners( - ListenerArns=[ - http_listener_arn, - https_listener_arn]) - response.get('Listeners').should.have.length_of(0) - - # But not the target groups - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - - # Which we'll now delete - conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(0) - - -@mock_elbv2 -@mock_ec2 -def test_create_target_group_without_non_required_parameters(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - # request without HealthCheckIntervalSeconds parameter - # which is default to 30 seconds - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080' - ) - target_group = response.get('TargetGroups')[0] - target_group.should_not.be.none - - -@mock_elbv2 -@mock_ec2 -def test_create_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - # Fail to create target group with name which length is 33 - long_name = 'A' * 33 - with assert_raises(ClientError): - conn.create_target_group( - Name=long_name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - invalid_names = [ - '-name', - 'name-', - '-name-', - 'example.com', - 'test@test', - 'Na--me'] - for name in invalid_names: - with assert_raises(ClientError): - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - valid_names = ['name', 'Name', '000'] - for name in valid_names: - conn.create_target_group( - Name=name, - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - -@mock_elbv2 -@mock_ec2 -def test_describe_paginated_balancers(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - for i in range(51): - conn.create_load_balancer( - Name='my-lb%d' % i, - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - resp = conn.describe_load_balancers() - resp['LoadBalancers'].should.have.length_of(50) - resp['NextMarker'].should.equal( - resp['LoadBalancers'][-1]['LoadBalancerName']) - resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) - resp2['LoadBalancers'].should.have.length_of(1) - assert 'NextToken' not in resp2.keys() - - -@mock_elbv2 -@mock_ec2 -def test_delete_load_balancer(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers').should.have.length_of(1) - lb = response.get('LoadBalancers')[0] - - conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) - balancers = conn.describe_load_balancers().get('LoadBalancers') - balancers.should.have.length_of(0) - - -@mock_ec2 -@mock_elbv2 -def test_register_targets(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # No targets registered yet - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(0) - - response = ec2.create_instances( - ImageId='ami-1234abcd', MinCount=2, MaxCount=2) - instance_id1 = response[0].id - instance_id2 = response[1].id - - response = conn.register_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[ - { - 'Id': instance_id1, - 'Port': 5060, - }, - { - 'Id': instance_id2, - 'Port': 4030, - }, - ]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(2) - - response = conn.deregister_targets( - TargetGroupArn=target_group.get('TargetGroupArn'), - Targets=[{'Id': instance_id2}]) - - response = conn.describe_target_health( - TargetGroupArn=target_group.get('TargetGroupArn')) - response.get('TargetHealthDescriptions').should.have.length_of(1) - - -@mock_ec2 -@mock_elbv2 -def test_target_group_attributes(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Check it's in the describe_target_groups response - response = conn.describe_target_groups() - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # check if Names filter works - response = conn.describe_target_groups(Names=[]) - response = conn.describe_target_groups(Names=['a-target']) - response.get('TargetGroups').should.have.length_of(1) - target_group_arn = target_group['TargetGroupArn'] - - # The attributes should start with the two defaults - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['deregistration_delay.timeout_seconds'].should.equal('300') - attributes['stickiness.enabled'].should.equal('false') - - # Add cookie stickiness - response = conn.modify_target_group_attributes( - TargetGroupArn=target_group_arn, - Attributes=[ - { - 'Key': 'stickiness.enabled', - 'Value': 'true', - }, - { - 'Key': 'stickiness.type', - 'Value': 'lb_cookie', - }, - ]) - - # The response should have only the keys updated - response['Attributes'].should.have.length_of(2) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - # These new values should be in the full attribute list - response = conn.describe_target_group_attributes( - TargetGroupArn=target_group_arn) - response['Attributes'].should.have.length_of(3) - attributes = {attr['Key']: attr['Value'] - for attr in response['Attributes']} - attributes['stickiness.type'].should.equal('lb_cookie') - attributes['stickiness.enabled'].should.equal('true') - - -@mock_elbv2 -@mock_ec2 -def test_handle_listener_rules(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - # Can't create a target group with an invalid protocol - with assert_raises(ClientError): - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='/HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - - # Plain HTTP listener - response = conn.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) - listener = response.get('Listeners')[0] - listener.get('Port').should.equal(80) - listener.get('Protocol').should.equal('HTTP') - listener.get('DefaultActions').should.equal([{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward'}]) - http_listener_arn = listener.get('ListenerArn') - - # create first rule - priority = 100 - host = 'xxx.example.com' - path_pattern = 'foobar' - created_rule = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - )['Rules'][0] - created_rule['Priority'].should.equal('100') - - # check if rules is sorted by priority - priority = 50 - host = 'yyy.example.com' - path_pattern = 'foobar' - rules = conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for PriorityInUse - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for describe listeners - obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) - len(obtained_rules['Rules']).should.equal(3) - priorities = [rule['Priority'] for rule in obtained_rules['Rules']] - priorities.should.equal(['50', '100', 'default']) - - first_rule = obtained_rules['Rules'][0] - second_rule = obtained_rules['Rules'][1] - obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) - obtained_rules['Rules'].should.equal([first_rule]) - - # test for pagination - obtained_rules = conn.describe_rules( - ListenerArn=http_listener_arn, PageSize=1) - len(obtained_rules['Rules']).should.equal(1) - obtained_rules.should.have.key('NextMarker') - next_marker = obtained_rules['NextMarker'] - - following_rules = conn.describe_rules( - ListenerArn=http_listener_arn, - PageSize=1, - Marker=next_marker) - len(following_rules['Rules']).should.equal(1) - following_rules.should.have.key('NextMarker') - following_rules['Rules'][0]['RuleArn'].should_not.equal( - obtained_rules['Rules'][0]['RuleArn']) - - # test for invalid describe rule request - with assert_raises(ClientError): - conn.describe_rules() - with assert_raises(ClientError): - conn.describe_rules(RuleArns=[]) - with assert_raises(ClientError): - conn.describe_rules( - ListenerArn=http_listener_arn, - RuleArns=[first_rule['RuleArn']] - ) - - # modify rule partially - new_host = 'new.example.com' - new_path_pattern = 'new_path' - modified_rule = conn.modify_rule( - RuleArn=first_rule['RuleArn'], - Conditions=[{ - 'Field': 'host-header', - 'Values': [new_host] - }, - { - 'Field': 'path-pattern', - 'Values': [new_path_pattern] - }] - )['Rules'][0] - - rules = conn.describe_rules(ListenerArn=http_listener_arn) - obtained_rule = rules['Rules'][0] - modified_rule.should.equal(obtained_rule) - obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) - obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) - obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( - target_group.get('TargetGroupArn')) - - # modify priority - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], - 'Priority': int(first_rule['Priority']) - 1} - ] - ) - with assert_raises(ClientError): - conn.set_rule_priorities( - RulePriorities=[ - {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, - {'RuleArn': second_rule['RuleArn'], 'Priority': 999} - ] - ) - - # delete - arn = first_rule['RuleArn'] - conn.delete_rule(RuleArn=arn) - rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] - len(rules).should.equal(2) - - # test for invalid action type - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward2' - }] - ) - - # test for invalid action type - safe_priority = 2 - invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host] - }, - { - 'Field': 'path-pattern', - 'Values': [path_pattern] - }], - Actions=[{ - 'TargetGroupArn': invalid_target_group_arn, - 'Type': 'forward' - }] - ) - - # test for invalid condition field_name - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'xxxxxxx', - 'Values': [host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for emptry condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - # test for multiple condition value - safe_priority = 2 - with assert_raises(ClientError): - conn.create_rule( - ListenerArn=http_listener_arn, - Priority=safe_priority, - Conditions=[{ - 'Field': 'host-header', - 'Values': [host, host] - }], - Actions=[{ - 'TargetGroupArn': target_group.get('TargetGroupArn'), - 'Type': 'forward' - }] - ) - - -@mock_elbv2 -@mock_ec2 -def test_describe_invalid_target_group(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - # Check error raises correctly - with assert_raises(ClientError): - conn.describe_target_groups(Names=['invalid']) - - -@mock_elbv2 -@mock_ec2 -def test_describe_target_groups_no_arguments(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - response.get('LoadBalancers')[0].get('LoadBalancerArn') - - conn.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - - assert len(conn.describe_target_groups()['TargetGroups']) == 1 - - -@mock_elbv2 -def test_describe_account_limits(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_account_limits() - resp['Limits'][0].should.contain('Name') - resp['Limits'][0].should.contain('Max') - - -@mock_elbv2 -def test_describe_ssl_policies(): - client = boto3.client('elbv2', region_name='eu-central-1') - - resp = client.describe_ssl_policies() - len(resp['SslPolicies']).should.equal(5) - - resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) - len(resp['SslPolicies']).should.equal(2) - - -@mock_elbv2 -@mock_ec2 -def test_set_ip_address_type(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - # Internal LBs cant be dualstack yet - with assert_raises(ClientError): - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - # Create internet facing one - response = client.create_load_balancer( - Name='my-lb2', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internet-facing', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_ip_address_type( - LoadBalancerArn=arn, - IpAddressType='dualstack' - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_security_groups(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - security_group2 = ec2.create_security_group( - GroupName='b-security-group', Description='Second One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=[security_group.id, security_group2.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) - - with assert_raises(ClientError): - client.set_security_groups( - LoadBalancerArn=arn, - SecurityGroups=['non_existant'] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - subnet3 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1c') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet3.id] - ) - - resp = client.describe_load_balancers(LoadBalancerArns=[arn]) - len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) - - # Only 1 AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id] - ) - - # Multiple subnets in same AZ - with assert_raises(ClientError): - client.set_subnets( - LoadBalancerArn=arn, - Subnets=[subnet1.id, subnet2.id, subnet2.id] - ) - - -@mock_elbv2 -@mock_ec2 -def test_set_subnets(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - arn = response['LoadBalancers'][0]['LoadBalancerArn'] - - client.modify_load_balancer_attributes( - LoadBalancerArn=arn, - Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] - ) - - # Check its 600 not 60 - response = client.describe_load_balancer_attributes( - LoadBalancerArn=arn - ) - idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] - idle_timeout['Value'].should.equal('600') - - -@mock_elbv2 -@mock_ec2 -def test_modify_target_group(): - client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - arn = response.get('TargetGroups')[0]['TargetGroupArn'] - - client.modify_target_group( - TargetGroupArn=arn, - HealthCheckProtocol='HTTPS', - HealthCheckPort='8081', - HealthCheckPath='/status', - HealthCheckIntervalSeconds=10, - HealthCheckTimeoutSeconds=10, - HealthyThresholdCount=10, - UnhealthyThresholdCount=4, - Matcher={'HttpCode': '200-399'} - ) - - response = client.describe_target_groups( - TargetGroupArns=[arn] - ) - response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') - response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') - response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') - response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') - response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) - response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) - response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) - - -@mock_elbv2 -@mock_ec2 -@mock_acm -def test_modify_listener_http_to_https(): - client = boto3.client('elbv2', region_name='eu-central-1') - acm = boto3.client('acm', region_name='eu-central-1') - ec2 = boto3.resource('ec2', region_name='eu-central-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='eu-central-1b') - - response = client.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) - - load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') - - response = client.create_target_group( - Name='a-target', - Protocol='HTTP', - Port=8080, - VpcId=vpc.id, - HealthCheckProtocol='HTTP', - HealthCheckPort='8080', - HealthCheckPath='/', - HealthCheckIntervalSeconds=5, - HealthCheckTimeoutSeconds=5, - HealthyThresholdCount=5, - UnhealthyThresholdCount=2, - Matcher={'HttpCode': '200'}) - target_group = response.get('TargetGroups')[0] - target_group_arn = target_group['TargetGroupArn'] - - # Plain HTTP listener - response = client.create_listener( - LoadBalancerArn=load_balancer_arn, - Protocol='HTTP', - Port=80, - DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] - ) - listener_arn = response['Listeners'][0]['ListenerArn'] - - response = acm.request_certificate( - DomainName='google.com', - SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], - ) - google_arn = response['CertificateArn'] - response = acm.request_certificate( - DomainName='yahoo.com', - SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], - ) - yahoo_arn = response['CertificateArn'] - - response = client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False}, - {'CertificateArn': yahoo_arn, 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - response['Listeners'][0]['Port'].should.equal(443) - response['Listeners'][0]['Protocol'].should.equal('HTTPS') - response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') - len(response['Listeners'][0]['Certificates']).should.equal(2) - - # Check default cert, can't do this in server mode - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] - listener.certificate.should.equal(yahoo_arn) - - # No default cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': google_arn, 'IsDefault': False} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - # Bad cert - with assert_raises(ClientError): - client.modify_listener( - ListenerArn=listener_arn, - Port=443, - Protocol='HTTPS', - SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', - Certificates=[ - {'CertificateArn': 'lalala', 'IsDefault': True} - ], - DefaultActions=[ - {'Type': 'forward', 'TargetGroupArn': target_group_arn} - ] - ) - - -@mock_ec2 -@mock_elbv2 -@mock_cloudformation -def test_create_target_groups_through_cloudformation(): - cfn_conn = boto3.client('cloudformation', region_name='us-east-1') - elbv2_client = boto3.client('elbv2', region_name='us-east-1') - - # test that setting a name manually as well as letting cloudformation create a name both work - # this is a special case because test groups have a name length limit of 22 characters, and must be unique - # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name - template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "ECS Cluster Test CloudFormation", - "Resources": { - "testVPC": { - "Type": "AWS::EC2::VPC", - "Properties": { - "CidrBlock": "10.0.0.0/16", - }, - }, - "testGroup1": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 80, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup2": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Port": 90, - "Protocol": "HTTP", - "VpcId": {"Ref": "testVPC"}, - }, - }, - "testGroup3": { - "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", - "Properties": { - "Name": "MyTargetGroup", - "Port": 70, - "Protocol": "HTTPS", - "VpcId": {"Ref": "testVPC"}, - }, - }, - } - } - template_json = json.dumps(template) - cfn_conn.create_stack( - StackName="test-stack", - TemplateBody=template_json, - ) - - describe_target_groups_response = elbv2_client.describe_target_groups() - target_group_dicts = describe_target_groups_response['TargetGroups'] - assert len(target_group_dicts) == 3 - - # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) - # and one named MyTargetGroup - assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 - assert len( - [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] - ) == 2 +from __future__ import unicode_literals + +import json +import os +import boto3 +import botocore +from botocore.exceptions import ClientError +from nose.tools import assert_raises +import sure # noqa + +from moto import mock_elbv2, mock_ec2, mock_acm, mock_cloudformation +from moto.elbv2 import elbv2_backends + + +@mock_elbv2 +@mock_ec2 +def test_create_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lb = response.get('LoadBalancers')[0] + + lb.get('DNSName').should.equal("my-lb-1.us-east-1.elb.amazonaws.com") + lb.get('LoadBalancerArn').should.equal( + 'arn:aws:elasticloadbalancing:us-east-1:1:loadbalancer/my-lb/50dc6c495c0c9188') + lb.get('SecurityGroups').should.equal([security_group.id]) + lb.get('AvailabilityZones').should.equal([ + {'SubnetId': subnet1.id, 'ZoneName': 'us-east-1a'}, + {'SubnetId': subnet2.id, 'ZoneName': 'us-east-1b'}]) + + # Ensure the tags persisted + response = conn.describe_tags(ResourceArns=[lb.get('LoadBalancerArn')]) + tags = {d['Key']: d['Value'] + for d in response['TagDescriptions'][0]['Tags']} + tags.should.equal({'key_name': 'a_value'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_load_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.describe_load_balancers() + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + lb.get('LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers( + LoadBalancerArns=[lb.get('LoadBalancerArn')]) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + response = conn.describe_load_balancers(Names=['my-lb']) + response.get('LoadBalancers')[0].get( + 'LoadBalancerName').should.equal('my-lb') + + with assert_raises(ClientError): + conn.describe_load_balancers(LoadBalancerArns=['not-a/real/arn']) + with assert_raises(ClientError): + conn.describe_load_balancers(Names=['nope']) + + +@mock_elbv2 +@mock_ec2 +def test_add_remove_tags(): + conn = boto3.client('elbv2', region_name='us-east-1') + + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + lbs = conn.describe_load_balancers()['LoadBalancers'] + lbs.should.have.length_of(1) + lb = lbs[0] + + with assert_raises(ClientError): + conn.add_tags(ResourceArns=['missing-arn'], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + tags.should.have.key('a').which.should.equal('b') + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'a', + 'Value': 'b' + }, { + 'Key': 'b', + 'Value': 'b' + }, { + 'Key': 'c', + 'Value': 'b' + }, { + 'Key': 'd', + 'Value': 'b' + }, { + 'Key': 'e', + 'Value': 'b' + }, { + 'Key': 'f', + 'Value': 'b' + }, { + 'Key': 'g', + 'Value': 'b' + }, { + 'Key': 'h', + 'Value': 'b' + }, { + 'Key': 'j', + 'Value': 'b' + }]) + + conn.add_tags.when.called_with(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'k', + 'Value': 'b' + }]).should.throw(botocore.exceptions.ClientError) + + conn.add_tags(ResourceArns=[lb.get('LoadBalancerArn')], + Tags=[{ + 'Key': 'j', + 'Value': 'c' + }]) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.should.have.key('a').which.should.equal('b') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + tags.shouldnt.have.key('k') + + conn.remove_tags(ResourceArns=[lb.get('LoadBalancerArn')], + TagKeys=['a']) + + tags = {d['Key']: d['Value'] for d in conn.describe_tags( + ResourceArns=[lb.get('LoadBalancerArn')])['TagDescriptions'][0]['Tags']} + + tags.shouldnt.have.key('a') + tags.should.have.key('b').which.should.equal('b') + tags.should.have.key('c').which.should.equal('b') + tags.should.have.key('d').which.should.equal('b') + tags.should.have.key('e').which.should.equal('b') + tags.should.have.key('f').which.should.equal('b') + tags.should.have.key('g').which.should.equal('b') + tags.should.have.key('h').which.should.equal('b') + tags.should.have.key('j').which.should.equal('c') + + +@mock_elbv2 +@mock_ec2 +def test_create_elb_in_multiple_region(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client('elbv2', region_name=region) + ec2 = boto3.resource('ec2', region_name=region) + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc( + CidrBlock='172.28.7.0/24', + InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone=region + 'b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + list( + boto3.client( + 'elbv2', + region_name='us-west-1').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + list( + boto3.client( + 'elbv2', + region_name='us-west-2').describe_load_balancers().get('LoadBalancers') + ).should.have.length_of(1) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_and_listeners(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Add tags to the target group + conn.add_tags(ResourceArns=[target_group_arn], Tags=[ + {'Key': 'target', 'Value': 'group'}]) + conn.describe_tags(ResourceArns=[target_group_arn])['TagDescriptions'][0]['Tags'].should.equal( + [{'Key': 'target', 'Value': 'group'}]) + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + + # And another with SSL + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTPS', + Port=443, + Certificates=[ + {'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert'}], + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + listener.get('Certificates').should.equal([{ + 'CertificateArn': 'arn:aws:iam:123456789012:server-certificate/test-cert', + }]) + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + + https_listener_arn = listener.get('ListenerArn') + + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + response = conn.describe_listeners(ListenerArns=[https_listener_arn]) + response.get('Listeners').should.have.length_of(1) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(443) + listener.get('Protocol').should.equal('HTTPS') + + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(2) + + # Try to delete the target group and it fails because there's a + # listener referencing it + with assert_raises(ClientError) as e: + conn.delete_target_group( + TargetGroupArn=target_group.get('TargetGroupArn')) + e.exception.operation_name.should.equal('DeleteTargetGroup') + e.exception.args.should.equal(("An error occurred (ResourceInUse) when calling the DeleteTargetGroup operation: The target group 'arn:aws:elasticloadbalancing:us-east-1:1:targetgroup/a-target/50dc6c495c0c9188' is currently in use by a listener or a rule", )) # NOQA + + # Delete one listener + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(2) + conn.delete_listener(ListenerArn=http_listener_arn) + response = conn.describe_listeners(LoadBalancerArn=load_balancer_arn) + response.get('Listeners').should.have.length_of(1) + + # Then delete the load balancer + conn.delete_load_balancer(LoadBalancerArn=load_balancer_arn) + + # It's gone + response = conn.describe_load_balancers() + response.get('LoadBalancers').should.have.length_of(0) + + # And it deleted the remaining listener + response = conn.describe_listeners( + ListenerArns=[ + http_listener_arn, + https_listener_arn]) + response.get('Listeners').should.have.length_of(0) + + # But not the target groups + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + + # Which we'll now delete + conn.delete_target_group(TargetGroupArn=target_group.get('TargetGroupArn')) + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(0) + + +@mock_elbv2 +@mock_ec2 +def test_create_target_group_without_non_required_parameters(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + # request without HealthCheckIntervalSeconds parameter + # which is default to 30 seconds + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080' + ) + target_group = response.get('TargetGroups')[0] + target_group.should_not.be.none + + +@mock_elbv2 +@mock_ec2 +def test_create_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + # Fail to create target group with name which length is 33 + long_name = 'A' * 33 + with assert_raises(ClientError): + conn.create_target_group( + Name=long_name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + invalid_names = [ + '-name', + 'name-', + '-name-', + 'example.com', + 'test@test', + 'Na--me'] + for name in invalid_names: + with assert_raises(ClientError): + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + valid_names = ['name', 'Name', '000'] + for name in valid_names: + conn.create_target_group( + Name=name, + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + +@mock_elbv2 +@mock_ec2 +def test_describe_paginated_balancers(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + for i in range(51): + conn.create_load_balancer( + Name='my-lb%d' % i, + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + resp = conn.describe_load_balancers() + resp['LoadBalancers'].should.have.length_of(50) + resp['NextMarker'].should.equal( + resp['LoadBalancers'][-1]['LoadBalancerName']) + resp2 = conn.describe_load_balancers(Marker=resp['NextMarker']) + resp2['LoadBalancers'].should.have.length_of(1) + assert 'NextToken' not in resp2.keys() + + +@mock_elbv2 +@mock_ec2 +def test_delete_load_balancer(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers').should.have.length_of(1) + lb = response.get('LoadBalancers')[0] + + conn.delete_load_balancer(LoadBalancerArn=lb.get('LoadBalancerArn')) + balancers = conn.describe_load_balancers().get('LoadBalancers') + balancers.should.have.length_of(0) + + +@mock_ec2 +@mock_elbv2 +def test_register_targets(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # No targets registered yet + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(0) + + response = ec2.create_instances( + ImageId='ami-1234abcd', MinCount=2, MaxCount=2) + instance_id1 = response[0].id + instance_id2 = response[1].id + + response = conn.register_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[ + { + 'Id': instance_id1, + 'Port': 5060, + }, + { + 'Id': instance_id2, + 'Port': 4030, + }, + ]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(2) + + response = conn.deregister_targets( + TargetGroupArn=target_group.get('TargetGroupArn'), + Targets=[{'Id': instance_id2}]) + + response = conn.describe_target_health( + TargetGroupArn=target_group.get('TargetGroupArn')) + response.get('TargetHealthDescriptions').should.have.length_of(1) + + +@mock_ec2 +@mock_elbv2 +def test_target_group_attributes(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Check it's in the describe_target_groups response + response = conn.describe_target_groups() + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # check if Names filter works + response = conn.describe_target_groups(Names=[]) + response = conn.describe_target_groups(Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + target_group_arn = target_group['TargetGroupArn'] + + # The attributes should start with the two defaults + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['deregistration_delay.timeout_seconds'].should.equal('300') + attributes['stickiness.enabled'].should.equal('false') + + # Add cookie stickiness + response = conn.modify_target_group_attributes( + TargetGroupArn=target_group_arn, + Attributes=[ + { + 'Key': 'stickiness.enabled', + 'Value': 'true', + }, + { + 'Key': 'stickiness.type', + 'Value': 'lb_cookie', + }, + ]) + + # The response should have only the keys updated + response['Attributes'].should.have.length_of(2) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + # These new values should be in the full attribute list + response = conn.describe_target_group_attributes( + TargetGroupArn=target_group_arn) + response['Attributes'].should.have.length_of(3) + attributes = {attr['Key']: attr['Value'] + for attr in response['Attributes']} + attributes['stickiness.type'].should.equal('lb_cookie') + attributes['stickiness.enabled'].should.equal('true') + + +@mock_elbv2 +@mock_ec2 +def test_handle_listener_rules(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + + # Plain HTTP listener + response = conn.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group.get('TargetGroupArn')}]) + listener = response.get('Listeners')[0] + listener.get('Port').should.equal(80) + listener.get('Protocol').should.equal('HTTP') + listener.get('DefaultActions').should.equal([{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward'}]) + http_listener_arn = listener.get('ListenerArn') + + # create first rule + priority = 100 + host = 'xxx.example.com' + path_pattern = 'foobar' + created_rule = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + )['Rules'][0] + created_rule['Priority'].should.equal('100') + + # check if rules is sorted by priority + priority = 50 + host = 'yyy.example.com' + path_pattern = 'foobar' + rules = conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for PriorityInUse + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for describe listeners + obtained_rules = conn.describe_rules(ListenerArn=http_listener_arn) + len(obtained_rules['Rules']).should.equal(3) + priorities = [rule['Priority'] for rule in obtained_rules['Rules']] + priorities.should.equal(['50', '100', 'default']) + + first_rule = obtained_rules['Rules'][0] + second_rule = obtained_rules['Rules'][1] + obtained_rules = conn.describe_rules(RuleArns=[first_rule['RuleArn']]) + obtained_rules['Rules'].should.equal([first_rule]) + + # test for pagination + obtained_rules = conn.describe_rules( + ListenerArn=http_listener_arn, PageSize=1) + len(obtained_rules['Rules']).should.equal(1) + obtained_rules.should.have.key('NextMarker') + next_marker = obtained_rules['NextMarker'] + + following_rules = conn.describe_rules( + ListenerArn=http_listener_arn, + PageSize=1, + Marker=next_marker) + len(following_rules['Rules']).should.equal(1) + following_rules.should.have.key('NextMarker') + following_rules['Rules'][0]['RuleArn'].should_not.equal( + obtained_rules['Rules'][0]['RuleArn']) + + # test for invalid describe rule request + with assert_raises(ClientError): + conn.describe_rules() + with assert_raises(ClientError): + conn.describe_rules(RuleArns=[]) + with assert_raises(ClientError): + conn.describe_rules( + ListenerArn=http_listener_arn, + RuleArns=[first_rule['RuleArn']] + ) + + # modify rule partially + new_host = 'new.example.com' + new_path_pattern = 'new_path' + modified_rule = conn.modify_rule( + RuleArn=first_rule['RuleArn'], + Conditions=[{ + 'Field': 'host-header', + 'Values': [new_host] + }, + { + 'Field': 'path-pattern', + 'Values': [new_path_pattern] + }] + )['Rules'][0] + + rules = conn.describe_rules(ListenerArn=http_listener_arn) + obtained_rule = rules['Rules'][0] + modified_rule.should.equal(obtained_rule) + obtained_rule['Conditions'][0]['Values'][0].should.equal(new_host) + obtained_rule['Conditions'][1]['Values'][0].should.equal(new_path_pattern) + obtained_rule['Actions'][0]['TargetGroupArn'].should.equal( + target_group.get('TargetGroupArn')) + + # modify priority + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], + 'Priority': int(first_rule['Priority']) - 1} + ] + ) + with assert_raises(ClientError): + conn.set_rule_priorities( + RulePriorities=[ + {'RuleArn': first_rule['RuleArn'], 'Priority': 999}, + {'RuleArn': second_rule['RuleArn'], 'Priority': 999} + ] + ) + + # delete + arn = first_rule['RuleArn'] + conn.delete_rule(RuleArn=arn) + rules = conn.describe_rules(ListenerArn=http_listener_arn)['Rules'] + len(rules).should.equal(2) + + # test for invalid action type + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward2' + }] + ) + + # test for invalid action type + safe_priority = 2 + invalid_target_group_arn = target_group.get('TargetGroupArn') + 'x' + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host] + }, + { + 'Field': 'path-pattern', + 'Values': [path_pattern] + }], + Actions=[{ + 'TargetGroupArn': invalid_target_group_arn, + 'Type': 'forward' + }] + ) + + # test for invalid condition field_name + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'xxxxxxx', + 'Values': [host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for emptry condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + # test for multiple condition value + safe_priority = 2 + with assert_raises(ClientError): + conn.create_rule( + ListenerArn=http_listener_arn, + Priority=safe_priority, + Conditions=[{ + 'Field': 'host-header', + 'Values': [host, host] + }], + Actions=[{ + 'TargetGroupArn': target_group.get('TargetGroupArn'), + 'Type': 'forward' + }] + ) + + +@mock_elbv2 +@mock_ec2 +def test_describe_invalid_target_group(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + # Check error raises correctly + with assert_raises(ClientError): + conn.describe_target_groups(Names=['invalid']) + + +@mock_elbv2 +@mock_ec2 +def test_describe_target_groups_no_arguments(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + response.get('LoadBalancers')[0].get('LoadBalancerArn') + + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + + assert len(conn.describe_target_groups()['TargetGroups']) == 1 + + +@mock_elbv2 +def test_describe_account_limits(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_account_limits() + resp['Limits'][0].should.contain('Name') + resp['Limits'][0].should.contain('Max') + + +@mock_elbv2 +def test_describe_ssl_policies(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_ssl_policies() + len(resp['SslPolicies']).should.equal(5) + + resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) + len(resp['SslPolicies']).should.equal(2) + + +@mock_elbv2 +@mock_ec2 +def test_set_ip_address_type(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + # Internal LBs cant be dualstack yet + with assert_raises(ClientError): + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + # Create internet facing one + response = client.create_load_balancer( + Name='my-lb2', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internet-facing', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_security_groups(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + security_group2 = ec2.create_security_group( + GroupName='b-security-group', Description='Second One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=[security_group.id, security_group2.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) + + with assert_raises(ClientError): + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=['non_existant'] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + subnet3 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1c') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet3.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) + + # Only 1 AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id] + ) + + # Multiple subnets in same AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet2.id] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.modify_load_balancer_attributes( + LoadBalancerArn=arn, + Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] + ) + + # Check its 600 not 60 + response = client.describe_load_balancer_attributes( + LoadBalancerArn=arn + ) + idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] + idle_timeout['Value'].should.equal('600') + + +@mock_elbv2 +@mock_ec2 +def test_modify_target_group(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + arn = response.get('TargetGroups')[0]['TargetGroupArn'] + + client.modify_target_group( + TargetGroupArn=arn, + HealthCheckProtocol='HTTPS', + HealthCheckPort='8081', + HealthCheckPath='/status', + HealthCheckIntervalSeconds=10, + HealthCheckTimeoutSeconds=10, + HealthyThresholdCount=10, + UnhealthyThresholdCount=4, + Matcher={'HttpCode': '200-399'} + ) + + response = client.describe_target_groups( + TargetGroupArns=[arn] + ) + response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') + response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') + response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') + response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') + response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) + response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) + + +@mock_elbv2 +@mock_ec2 +@mock_acm +def test_modify_listener_http_to_https(): + client = boto3.client('elbv2', region_name='eu-central-1') + acm = boto3.client('acm', region_name='eu-central-1') + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Plain HTTP listener + response = client.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] + ) + listener_arn = response['Listeners'][0]['ListenerArn'] + + response = acm.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + google_arn = response['CertificateArn'] + response = acm.request_certificate( + DomainName='yahoo.com', + SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], + ) + yahoo_arn = response['CertificateArn'] + + response = client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False}, + {'CertificateArn': yahoo_arn, 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + response['Listeners'][0]['Port'].should.equal(443) + response['Listeners'][0]['Protocol'].should.equal('HTTPS') + response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') + len(response['Listeners'][0]['Certificates']).should.equal(2) + + # Check default cert, can't do this in server mode + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] + listener.certificate.should.equal(yahoo_arn) + + # No default cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + # Bad cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': 'lalala', 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_create_target_groups_through_cloudformation(): + cfn_conn = boto3.client('cloudformation', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + + # test that setting a name manually as well as letting cloudformation create a name both work + # this is a special case because test groups have a name length limit of 22 characters, and must be unique + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticloadbalancingv2-targetgroup.html#cfn-elasticloadbalancingv2-targetgroup-name + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testVPC": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + }, + }, + "testGroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 80, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Port": 90, + "Protocol": "HTTP", + "VpcId": {"Ref": "testVPC"}, + }, + }, + "testGroup3": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "Name": "MyTargetGroup", + "Port": 70, + "Protocol": "HTTPS", + "VpcId": {"Ref": "testVPC"}, + }, + }, + } + } + template_json = json.dumps(template) + cfn_conn.create_stack( + StackName="test-stack", + TemplateBody=template_json, + ) + + describe_target_groups_response = elbv2_client.describe_target_groups() + target_group_dicts = describe_target_groups_response['TargetGroups'] + assert len(target_group_dicts) == 3 + + # there should be 2 target groups with the same prefix of 10 characters (since the random suffix is 12) + # and one named MyTargetGroup + assert len([tg for tg in target_group_dicts if tg['TargetGroupName'] == 'MyTargetGroup']) == 1 + assert len( + [tg for tg in target_group_dicts if tg['TargetGroupName'].startswith('test-stack')] + ) == 2 diff --git a/tests/test_elbv2/test_server.py b/tests/test_elbv2/test_server.py index ddd40a02d..7d47d23ad 100644 --- a/tests/test_elbv2/test_server.py +++ b/tests/test_elbv2/test_server.py @@ -1,17 +1,17 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_elbv2_describe_load_balancers(): - backend = server.create_backend_app("elbv2") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') - - res.data.should.contain(b'DescribeLoadBalancersResponse') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_elbv2_describe_load_balancers(): + backend = server.create_backend_app("elbv2") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeLoadBalancers&Version=2015-12-01') + + res.data.should.contain(b'DescribeLoadBalancersResponse') diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 505c69b11..a1918ac30 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -1,658 +1,658 @@ -from __future__ import unicode_literals -import time -from datetime import datetime - -import boto -import pytz -from boto.emr.bootstrap_action import BootstrapAction -from boto.emr.instance_group import InstanceGroup -from boto.emr.step import StreamingStep - -import six -import sure # noqa - -from moto import mock_emr_deprecated -from tests.helpers import requires_boto_gte - - -run_jobflow_args = dict( - job_flow_role='EMR_EC2_DefaultRole', - keep_alive=True, - log_uri='s3://some_bucket/jobflow_logs', - master_instance_type='c1.medium', - name='My jobflow', - num_instances=2, - service_role='EMR_DefaultRole', - slave_instance_type='c1.medium', -) - - -input_instance_groups = [ - InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'), - InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'), - InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'), - InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'), -] - - -@mock_emr_deprecated -def test_describe_cluster(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args.update(dict( - api_params={ - 'Applications.member.1.Name': 'Spark', - 'Applications.member.1.Version': '2.4.2', - 'Configurations.member.1.Classification': 'yarn-site', - 'Configurations.member.1.Properties.entry.1.key': 'someproperty', - 'Configurations.member.1.Properties.entry.1.value': 'somevalue', - 'Configurations.member.1.Properties.entry.2.key': 'someotherproperty', - 'Configurations.member.1.Properties.entry.2.value': 'someothervalue', - 'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', - 'Instances.Ec2SubnetId': 'subnet-8be41cec', - }, - availability_zone='us-east-2b', - ec2_keyname='mykey', - job_flow_role='EMR_EC2_DefaultRole', - keep_alive=False, - log_uri='s3://some_bucket/jobflow_logs', - name='My jobflow', - service_role='EMR_DefaultRole', - visible_to_all_users=True, - )) - cluster_id = conn.run_jobflow(**args) - input_tags = {'tag1': 'val1', 'tag2': 'val2'} - conn.add_tags(cluster_id, input_tags) - - cluster = conn.describe_cluster(cluster_id) - cluster.applications[0].name.should.equal('Spark') - cluster.applications[0].version.should.equal('2.4.2') - cluster.autoterminate.should.equal('true') - - # configurations appear not be supplied as attributes? - - attrs = cluster.ec2instanceattributes - # AdditionalMasterSecurityGroups - # AdditionalSlaveSecurityGroups - attrs.ec2availabilityzone.should.equal(args['availability_zone']) - attrs.ec2keyname.should.equal(args['ec2_keyname']) - attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId']) - # EmrManagedMasterSecurityGroups - # EmrManagedSlaveSecurityGroups - attrs.iaminstanceprofile.should.equal(args['job_flow_role']) - # ServiceAccessSecurityGroup - - cluster.id.should.equal(cluster_id) - cluster.loguri.should.equal(args['log_uri']) - cluster.masterpublicdnsname.should.be.a(six.string_types) - cluster.name.should.equal(args['name']) - int(cluster.normalizedinstancehours).should.equal(0) - # cluster.release_label - cluster.shouldnt.have.property('requestedamiversion') - cluster.runningamiversion.should.equal('1.0.0') - # cluster.securityconfiguration - cluster.servicerole.should.equal(args['service_role']) - - cluster.status.state.should.equal('TERMINATED') - cluster.status.statechangereason.message.should.be.a(six.string_types) - cluster.status.statechangereason.code.should.be.a(six.string_types) - cluster.status.timeline.creationdatetime.should.be.a(six.string_types) - # cluster.status.timeline.enddatetime.should.be.a(six.string_types) - # cluster.status.timeline.readydatetime.should.be.a(six.string_types) - - dict((item.key, item.value) - for item in cluster.tags).should.equal(input_tags) - - cluster.terminationprotected.should.equal('false') - cluster.visibletoallusers.should.equal('true') - - -@mock_emr_deprecated -def test_describe_jobflows(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - expected = {} - - for idx in range(4): - cluster_name = 'cluster' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'state': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(4, 6): - cluster_name = 'cluster' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - conn.terminate_jobflow(cluster_id) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'state': 'TERMINATED' - } - jobs = conn.describe_jobflows() - jobs.should.have.length_of(6) - - for cluster_id, y in expected.items(): - resp = conn.describe_jobflows(jobflow_ids=[cluster_id]) - resp.should.have.length_of(1) - resp[0].jobflowid.should.equal(cluster_id) - - resp = conn.describe_jobflows(states=['WAITING']) - resp.should.have.length_of(4) - for x in resp: - x.state.should.equal('WAITING') - - resp = conn.describe_jobflows(created_before=timestamp) - resp.should.have.length_of(4) - - resp = conn.describe_jobflows(created_after=timestamp) - resp.should.have.length_of(2) - - -@mock_emr_deprecated -def test_describe_jobflow(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args.update(dict( - ami_version='3.8.1', - api_params={ - #'Applications.member.1.Name': 'Spark', - #'Applications.member.1.Version': '2.4.2', - #'Configurations.member.1.Classification': 'yarn-site', - #'Configurations.member.1.Properties.entry.1.key': 'someproperty', - #'Configurations.member.1.Properties.entry.1.value': 'somevalue', - #'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', - 'Instances.Ec2SubnetId': 'subnet-8be41cec', - }, - ec2_keyname='mykey', - hadoop_version='2.4.0', - - name='My jobflow', - log_uri='s3://some_bucket/jobflow_logs', - keep_alive=True, - master_instance_type='c1.medium', - slave_instance_type='c1.medium', - num_instances=2, - - availability_zone='us-west-2b', - - job_flow_role='EMR_EC2_DefaultRole', - service_role='EMR_DefaultRole', - visible_to_all_users=True, - )) - - cluster_id = conn.run_jobflow(**args) - jf = conn.describe_jobflow(cluster_id) - jf.amiversion.should.equal(args['ami_version']) - jf.bootstrapactions.should.equal(None) - jf.creationdatetime.should.be.a(six.string_types) - jf.should.have.property('laststatechangereason') - jf.readydatetime.should.be.a(six.string_types) - jf.startdatetime.should.be.a(six.string_types) - jf.state.should.equal('WAITING') - - jf.ec2keyname.should.equal(args['ec2_keyname']) - # Ec2SubnetId - jf.hadoopversion.should.equal(args['hadoop_version']) - int(jf.instancecount).should.equal(2) - - for ig in jf.instancegroups: - ig.creationdatetime.should.be.a(six.string_types) - # ig.enddatetime.should.be.a(six.string_types) - ig.should.have.property('instancegroupid').being.a(six.string_types) - int(ig.instancerequestcount).should.equal(1) - ig.instancerole.should.be.within(['MASTER', 'CORE']) - int(ig.instancerunningcount).should.equal(1) - ig.instancetype.should.equal('c1.medium') - ig.laststatechangereason.should.be.a(six.string_types) - ig.market.should.equal('ON_DEMAND') - ig.name.should.be.a(six.string_types) - ig.readydatetime.should.be.a(six.string_types) - ig.startdatetime.should.be.a(six.string_types) - ig.state.should.equal('RUNNING') - - jf.keepjobflowalivewhennosteps.should.equal('true') - jf.masterinstanceid.should.be.a(six.string_types) - jf.masterinstancetype.should.equal(args['master_instance_type']) - jf.masterpublicdnsname.should.be.a(six.string_types) - int(jf.normalizedinstancehours).should.equal(0) - jf.availabilityzone.should.equal(args['availability_zone']) - jf.slaveinstancetype.should.equal(args['slave_instance_type']) - jf.terminationprotected.should.equal('false') - - jf.jobflowid.should.equal(cluster_id) - # jf.jobflowrole.should.equal(args['job_flow_role']) - jf.loguri.should.equal(args['log_uri']) - jf.name.should.equal(args['name']) - # jf.servicerole.should.equal(args['service_role']) - - jf.steps.should.have.length_of(0) - - list(i.value for i in jf.supported_products).should.equal([]) - jf.visibletoallusers.should.equal('true') - - -@mock_emr_deprecated -def test_list_clusters(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - expected = {} - - for idx in range(40): - cluster_name = 'jobflow' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'normalizedinstancehours': '0', - 'state': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(40, 70): - cluster_name = 'jobflow' + str(idx) - args['name'] = cluster_name - cluster_id = conn.run_jobflow(**args) - conn.terminate_jobflow(cluster_id) - expected[cluster_id] = { - 'id': cluster_id, - 'name': cluster_name, - 'normalizedinstancehours': '0', - 'state': 'TERMINATED' - } - - args = {} - while 1: - resp = conn.list_clusters(**args) - clusters = resp.clusters - len(clusters).should.be.lower_than_or_equal_to(50) - for x in clusters: - y = expected[x.id] - x.id.should.equal(y['id']) - x.name.should.equal(y['name']) - x.normalizedinstancehours.should.equal( - y['normalizedinstancehours']) - x.status.state.should.equal(y['state']) - x.status.timeline.creationdatetime.should.be.a(six.string_types) - if y['state'] == 'TERMINATED': - x.status.timeline.enddatetime.should.be.a(six.string_types) - else: - x.status.timeline.shouldnt.have.property('enddatetime') - x.status.timeline.readydatetime.should.be.a(six.string_types) - if not hasattr(resp, 'marker'): - break - args = {'marker': resp.marker} - - resp = conn.list_clusters(cluster_states=['TERMINATED']) - resp.clusters.should.have.length_of(30) - for x in resp.clusters: - x.status.state.should.equal('TERMINATED') - - resp = conn.list_clusters(created_before=timestamp) - resp.clusters.should.have.length_of(40) - - resp = conn.list_clusters(created_after=timestamp) - resp.clusters.should.have.length_of(30) - - -@mock_emr_deprecated -def test_run_jobflow(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - job_id = conn.run_jobflow(**args) - job_flow = conn.describe_jobflow(job_id) - job_flow.state.should.equal('WAITING') - job_flow.jobflowid.should.equal(job_id) - job_flow.name.should.equal(args['name']) - job_flow.masterinstancetype.should.equal(args['master_instance_type']) - job_flow.slaveinstancetype.should.equal(args['slave_instance_type']) - job_flow.loguri.should.equal(args['log_uri']) - job_flow.visibletoallusers.should.equal('false') - int(job_flow.normalizedinstancehours).should.equal(0) - job_flow.steps.should.have.length_of(0) - - -@mock_emr_deprecated -def test_run_jobflow_in_multiple_regions(): - regions = {} - for region in ['us-east-1', 'eu-west-1']: - conn = boto.emr.connect_to_region(region) - args = run_jobflow_args.copy() - args['name'] = region - cluster_id = conn.run_jobflow(**args) - regions[region] = {'conn': conn, 'cluster_id': cluster_id} - - for region in regions.keys(): - conn = regions[region]['conn'] - jf = conn.describe_jobflow(regions[region]['cluster_id']) - jf.name.should.equal(region) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_new_params(): - # Test that run_jobflow works with newer params - conn = boto.connect_emr() - conn.run_jobflow(**run_jobflow_args) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_visible_to_all_users(): - conn = boto.connect_emr() - for expected in (True, False): - job_id = conn.run_jobflow( - visible_to_all_users=expected, - **run_jobflow_args - ) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal(str(expected).lower()) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_run_jobflow_with_instance_groups(): - input_groups = dict((g.name, g) for g in input_instance_groups) - conn = boto.connect_emr() - job_id = conn.run_jobflow(instance_groups=input_instance_groups, - **run_jobflow_args) - job_flow = conn.describe_jobflow(job_id) - int(job_flow.instancecount).should.equal( - sum(g.num_instances for g in input_instance_groups)) - for instance_group in job_flow.instancegroups: - expected = input_groups[instance_group.name] - instance_group.should.have.property('instancegroupid') - int(instance_group.instancerunningcount).should.equal( - expected.num_instances) - instance_group.instancerole.should.equal(expected.role) - instance_group.instancetype.should.equal(expected.type) - instance_group.market.should.equal(expected.market) - if hasattr(expected, 'bidprice'): - instance_group.bidprice.should.equal(expected.bidprice) - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_set_termination_protection(): - conn = boto.connect_emr() - job_id = conn.run_jobflow(**run_jobflow_args) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('false') - - conn.set_termination_protection(job_id, True) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('true') - - conn.set_termination_protection(job_id, False) - job_flow = conn.describe_jobflow(job_id) - job_flow.terminationprotected.should.equal('false') - - -@requires_boto_gte("2.8") -@mock_emr_deprecated -def test_set_visible_to_all_users(): - conn = boto.connect_emr() - args = run_jobflow_args.copy() - args['visible_to_all_users'] = False - job_id = conn.run_jobflow(**args) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('false') - - conn.set_visible_to_all_users(job_id, True) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('true') - - conn.set_visible_to_all_users(job_id, False) - job_flow = conn.describe_jobflow(job_id) - job_flow.visibletoallusers.should.equal('false') - - -@mock_emr_deprecated -def test_terminate_jobflow(): - conn = boto.connect_emr() - job_id = conn.run_jobflow(**run_jobflow_args) - flow = conn.describe_jobflows()[0] - flow.state.should.equal('WAITING') - - conn.terminate_jobflow(job_id) - flow = conn.describe_jobflows()[0] - flow.state.should.equal('TERMINATED') - - -# testing multiple end points for each feature - -@mock_emr_deprecated -def test_bootstrap_actions(): - bootstrap_actions = [ - BootstrapAction( - name='bs1', - path='path/to/script', - bootstrap_action_args=['arg1', 'arg2&arg3']), - BootstrapAction( - name='bs2', - path='path/to/anotherscript', - bootstrap_action_args=[]) - ] - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow( - bootstrap_actions=bootstrap_actions, - **run_jobflow_args - ) - - jf = conn.describe_jobflow(cluster_id) - for x, y in zip(jf.bootstrapactions, bootstrap_actions): - x.name.should.equal(y.name) - x.path.should.equal(y.path) - list(o.value for o in x.args).should.equal(y.args()) - - resp = conn.list_bootstrap_actions(cluster_id) - for i, y in enumerate(bootstrap_actions): - x = resp.actions[i] - x.name.should.equal(y.name) - x.scriptpath.should.equal(y.path) - list(arg.value for arg in x.args).should.equal(y.args()) - - -@mock_emr_deprecated -def test_instance_groups(): - input_groups = dict((g.name, g) for g in input_instance_groups) - - conn = boto.connect_emr() - args = run_jobflow_args.copy() - for key in ['master_instance_type', 'slave_instance_type', 'num_instances']: - del args[key] - args['instance_groups'] = input_instance_groups[:2] - job_id = conn.run_jobflow(**args) - - jf = conn.describe_jobflow(job_id) - base_instance_count = int(jf.instancecount) - - conn.add_instance_groups(job_id, input_instance_groups[2:]) - - jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal( - sum(g.num_instances for g in input_instance_groups)) - for x in jf.instancegroups: - y = input_groups[x.name] - if hasattr(y, 'bidprice'): - x.bidprice.should.equal(y.bidprice) - x.creationdatetime.should.be.a(six.string_types) - # x.enddatetime.should.be.a(six.string_types) - x.should.have.property('instancegroupid') - int(x.instancerequestcount).should.equal(y.num_instances) - x.instancerole.should.equal(y.role) - int(x.instancerunningcount).should.equal(y.num_instances) - x.instancetype.should.equal(y.type) - x.laststatechangereason.should.be.a(six.string_types) - x.market.should.equal(y.market) - x.name.should.be.a(six.string_types) - x.readydatetime.should.be.a(six.string_types) - x.startdatetime.should.be.a(six.string_types) - x.state.should.equal('RUNNING') - - for x in conn.list_instance_groups(job_id).instancegroups: - y = input_groups[x.name] - if hasattr(y, 'bidprice'): - x.bidprice.should.equal(y.bidprice) - # Configurations - # EbsBlockDevices - # EbsOptimized - x.should.have.property('id') - x.instancegrouptype.should.equal(y.role) - x.instancetype.should.equal(y.type) - x.market.should.equal(y.market) - x.name.should.equal(y.name) - int(x.requestedinstancecount).should.equal(y.num_instances) - int(x.runninginstancecount).should.equal(y.num_instances) - # ShrinkPolicy - x.status.state.should.equal('RUNNING') - x.status.statechangereason.code.should.be.a(six.string_types) - x.status.statechangereason.message.should.be.a(six.string_types) - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - x.status.timeline.readydatetime.should.be.a(six.string_types) - - igs = dict((g.name, g) for g in jf.instancegroups) - - conn.modify_instance_groups( - [igs['task-1'].instancegroupid, igs['task-2'].instancegroupid], - [2, 3]) - jf = conn.describe_jobflow(job_id) - int(jf.instancecount).should.equal(base_instance_count + 5) - igs = dict((g.name, g) for g in jf.instancegroups) - int(igs['task-1'].instancerunningcount).should.equal(2) - int(igs['task-2'].instancerunningcount).should.equal(3) - - -@mock_emr_deprecated -def test_steps(): - input_steps = [ - StreamingStep( - name='My wordcount example', - mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', - reducer='aggregate', - input='s3n://elasticmapreduce/samples/wordcount/input', - output='s3n://output_bucket/output/wordcount_output'), - StreamingStep( - name='My wordcount example & co.', - mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', - reducer='aggregate', - input='s3n://elasticmapreduce/samples/wordcount/input2', - output='s3n://output_bucket/output/wordcount_output2') - ] - - # TODO: implementation and test for cancel_steps - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow( - steps=[input_steps[0]], - **run_jobflow_args) - - jf = conn.describe_jobflow(cluster_id) - jf.steps.should.have.length_of(1) - - conn.add_jobflow_steps(cluster_id, [input_steps[1]]) - - jf = conn.describe_jobflow(cluster_id) - jf.steps.should.have.length_of(2) - for step in jf.steps: - step.actiononfailure.should.equal('TERMINATE_JOB_FLOW') - list(arg.value for arg in step.args).should.have.length_of(8) - step.creationdatetime.should.be.a(six.string_types) - # step.enddatetime.should.be.a(six.string_types) - step.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - step.laststatechangereason.should.be.a(six.string_types) - step.mainclass.should.equal('') - step.name.should.be.a(six.string_types) - # step.readydatetime.should.be.a(six.string_types) - # step.startdatetime.should.be.a(six.string_types) - step.state.should.be.within(['STARTING', 'PENDING']) - - expected = dict((s.name, s) for s in input_steps) - - steps = conn.list_steps(cluster_id).steps - for x in steps: - y = expected[x.name] - # actiononfailure - list(arg.value for arg in x.config.args).should.equal([ - '-mapper', y.mapper, - '-reducer', y.reducer, - '-input', y.input, - '-output', y.output, - ]) - x.config.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - x.config.mainclass.should.equal('') - # properties - x.should.have.property('id').should.be.a(six.string_types) - x.name.should.equal(y.name) - x.status.state.should.be.within(['STARTING', 'PENDING']) - # x.status.statechangereason - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - # x.status.timeline.startdatetime.should.be.a(six.string_types) - - x = conn.describe_step(cluster_id, x.id) - list(arg.value for arg in x.config.args).should.equal([ - '-mapper', y.mapper, - '-reducer', y.reducer, - '-input', y.input, - '-output', y.output, - ]) - x.config.jar.should.equal( - '/home/hadoop/contrib/streaming/hadoop-streaming.jar') - x.config.mainclass.should.equal('') - # properties - x.should.have.property('id').should.be.a(six.string_types) - x.name.should.equal(y.name) - x.status.state.should.be.within(['STARTING', 'PENDING']) - # x.status.statechangereason - x.status.timeline.creationdatetime.should.be.a(six.string_types) - # x.status.timeline.enddatetime.should.be.a(six.string_types) - # x.status.timeline.startdatetime.should.be.a(six.string_types) - - @requires_boto_gte('2.39') - def test_list_steps_with_states(): - # boto's list_steps prior to 2.39 has a bug that ignores - # step_states argument. - steps = conn.list_steps(cluster_id).steps - step_id = steps[0].id - steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps - steps.should.have.length_of(1) - steps[0].id.should.equal(step_id) - test_list_steps_with_states() - - -@mock_emr_deprecated -def test_tags(): - input_tags = {"tag1": "val1", "tag2": "val2"} - - conn = boto.connect_emr() - cluster_id = conn.run_jobflow(**run_jobflow_args) - - conn.add_tags(cluster_id, input_tags) - cluster = conn.describe_cluster(cluster_id) - cluster.tags.should.have.length_of(2) - dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags) - - conn.remove_tags(cluster_id, list(input_tags.keys())) - cluster = conn.describe_cluster(cluster_id) - cluster.tags.should.have.length_of(0) +from __future__ import unicode_literals +import time +from datetime import datetime + +import boto +import pytz +from boto.emr.bootstrap_action import BootstrapAction +from boto.emr.instance_group import InstanceGroup +from boto.emr.step import StreamingStep + +import six +import sure # noqa + +from moto import mock_emr_deprecated +from tests.helpers import requires_boto_gte + + +run_jobflow_args = dict( + job_flow_role='EMR_EC2_DefaultRole', + keep_alive=True, + log_uri='s3://some_bucket/jobflow_logs', + master_instance_type='c1.medium', + name='My jobflow', + num_instances=2, + service_role='EMR_DefaultRole', + slave_instance_type='c1.medium', +) + + +input_instance_groups = [ + InstanceGroup(1, 'MASTER', 'c1.medium', 'ON_DEMAND', 'master'), + InstanceGroup(3, 'CORE', 'c1.medium', 'ON_DEMAND', 'core'), + InstanceGroup(6, 'TASK', 'c1.large', 'SPOT', 'task-1', '0.07'), + InstanceGroup(10, 'TASK', 'c1.xlarge', 'SPOT', 'task-2', '0.05'), +] + + +@mock_emr_deprecated +def test_describe_cluster(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args.update(dict( + api_params={ + 'Applications.member.1.Name': 'Spark', + 'Applications.member.1.Version': '2.4.2', + 'Configurations.member.1.Classification': 'yarn-site', + 'Configurations.member.1.Properties.entry.1.key': 'someproperty', + 'Configurations.member.1.Properties.entry.1.value': 'somevalue', + 'Configurations.member.1.Properties.entry.2.key': 'someotherproperty', + 'Configurations.member.1.Properties.entry.2.value': 'someothervalue', + 'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', + 'Instances.Ec2SubnetId': 'subnet-8be41cec', + }, + availability_zone='us-east-2b', + ec2_keyname='mykey', + job_flow_role='EMR_EC2_DefaultRole', + keep_alive=False, + log_uri='s3://some_bucket/jobflow_logs', + name='My jobflow', + service_role='EMR_DefaultRole', + visible_to_all_users=True, + )) + cluster_id = conn.run_jobflow(**args) + input_tags = {'tag1': 'val1', 'tag2': 'val2'} + conn.add_tags(cluster_id, input_tags) + + cluster = conn.describe_cluster(cluster_id) + cluster.applications[0].name.should.equal('Spark') + cluster.applications[0].version.should.equal('2.4.2') + cluster.autoterminate.should.equal('true') + + # configurations appear not be supplied as attributes? + + attrs = cluster.ec2instanceattributes + # AdditionalMasterSecurityGroups + # AdditionalSlaveSecurityGroups + attrs.ec2availabilityzone.should.equal(args['availability_zone']) + attrs.ec2keyname.should.equal(args['ec2_keyname']) + attrs.ec2subnetid.should.equal(args['api_params']['Instances.Ec2SubnetId']) + # EmrManagedMasterSecurityGroups + # EmrManagedSlaveSecurityGroups + attrs.iaminstanceprofile.should.equal(args['job_flow_role']) + # ServiceAccessSecurityGroup + + cluster.id.should.equal(cluster_id) + cluster.loguri.should.equal(args['log_uri']) + cluster.masterpublicdnsname.should.be.a(six.string_types) + cluster.name.should.equal(args['name']) + int(cluster.normalizedinstancehours).should.equal(0) + # cluster.release_label + cluster.shouldnt.have.property('requestedamiversion') + cluster.runningamiversion.should.equal('1.0.0') + # cluster.securityconfiguration + cluster.servicerole.should.equal(args['service_role']) + + cluster.status.state.should.equal('TERMINATED') + cluster.status.statechangereason.message.should.be.a(six.string_types) + cluster.status.statechangereason.code.should.be.a(six.string_types) + cluster.status.timeline.creationdatetime.should.be.a(six.string_types) + # cluster.status.timeline.enddatetime.should.be.a(six.string_types) + # cluster.status.timeline.readydatetime.should.be.a(six.string_types) + + dict((item.key, item.value) + for item in cluster.tags).should.equal(input_tags) + + cluster.terminationprotected.should.equal('false') + cluster.visibletoallusers.should.equal('true') + + +@mock_emr_deprecated +def test_describe_jobflows(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + expected = {} + + for idx in range(4): + cluster_name = 'cluster' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'state': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(4, 6): + cluster_name = 'cluster' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + conn.terminate_jobflow(cluster_id) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'state': 'TERMINATED' + } + jobs = conn.describe_jobflows() + jobs.should.have.length_of(6) + + for cluster_id, y in expected.items(): + resp = conn.describe_jobflows(jobflow_ids=[cluster_id]) + resp.should.have.length_of(1) + resp[0].jobflowid.should.equal(cluster_id) + + resp = conn.describe_jobflows(states=['WAITING']) + resp.should.have.length_of(4) + for x in resp: + x.state.should.equal('WAITING') + + resp = conn.describe_jobflows(created_before=timestamp) + resp.should.have.length_of(4) + + resp = conn.describe_jobflows(created_after=timestamp) + resp.should.have.length_of(2) + + +@mock_emr_deprecated +def test_describe_jobflow(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args.update(dict( + ami_version='3.8.1', + api_params={ + #'Applications.member.1.Name': 'Spark', + #'Applications.member.1.Version': '2.4.2', + #'Configurations.member.1.Classification': 'yarn-site', + #'Configurations.member.1.Properties.entry.1.key': 'someproperty', + #'Configurations.member.1.Properties.entry.1.value': 'somevalue', + #'Instances.EmrManagedMasterSecurityGroup': 'master-security-group', + 'Instances.Ec2SubnetId': 'subnet-8be41cec', + }, + ec2_keyname='mykey', + hadoop_version='2.4.0', + + name='My jobflow', + log_uri='s3://some_bucket/jobflow_logs', + keep_alive=True, + master_instance_type='c1.medium', + slave_instance_type='c1.medium', + num_instances=2, + + availability_zone='us-west-2b', + + job_flow_role='EMR_EC2_DefaultRole', + service_role='EMR_DefaultRole', + visible_to_all_users=True, + )) + + cluster_id = conn.run_jobflow(**args) + jf = conn.describe_jobflow(cluster_id) + jf.amiversion.should.equal(args['ami_version']) + jf.bootstrapactions.should.equal(None) + jf.creationdatetime.should.be.a(six.string_types) + jf.should.have.property('laststatechangereason') + jf.readydatetime.should.be.a(six.string_types) + jf.startdatetime.should.be.a(six.string_types) + jf.state.should.equal('WAITING') + + jf.ec2keyname.should.equal(args['ec2_keyname']) + # Ec2SubnetId + jf.hadoopversion.should.equal(args['hadoop_version']) + int(jf.instancecount).should.equal(2) + + for ig in jf.instancegroups: + ig.creationdatetime.should.be.a(six.string_types) + # ig.enddatetime.should.be.a(six.string_types) + ig.should.have.property('instancegroupid').being.a(six.string_types) + int(ig.instancerequestcount).should.equal(1) + ig.instancerole.should.be.within(['MASTER', 'CORE']) + int(ig.instancerunningcount).should.equal(1) + ig.instancetype.should.equal('c1.medium') + ig.laststatechangereason.should.be.a(six.string_types) + ig.market.should.equal('ON_DEMAND') + ig.name.should.be.a(six.string_types) + ig.readydatetime.should.be.a(six.string_types) + ig.startdatetime.should.be.a(six.string_types) + ig.state.should.equal('RUNNING') + + jf.keepjobflowalivewhennosteps.should.equal('true') + jf.masterinstanceid.should.be.a(six.string_types) + jf.masterinstancetype.should.equal(args['master_instance_type']) + jf.masterpublicdnsname.should.be.a(six.string_types) + int(jf.normalizedinstancehours).should.equal(0) + jf.availabilityzone.should.equal(args['availability_zone']) + jf.slaveinstancetype.should.equal(args['slave_instance_type']) + jf.terminationprotected.should.equal('false') + + jf.jobflowid.should.equal(cluster_id) + # jf.jobflowrole.should.equal(args['job_flow_role']) + jf.loguri.should.equal(args['log_uri']) + jf.name.should.equal(args['name']) + # jf.servicerole.should.equal(args['service_role']) + + jf.steps.should.have.length_of(0) + + list(i.value for i in jf.supported_products).should.equal([]) + jf.visibletoallusers.should.equal('true') + + +@mock_emr_deprecated +def test_list_clusters(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + expected = {} + + for idx in range(40): + cluster_name = 'jobflow' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'normalizedinstancehours': '0', + 'state': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(40, 70): + cluster_name = 'jobflow' + str(idx) + args['name'] = cluster_name + cluster_id = conn.run_jobflow(**args) + conn.terminate_jobflow(cluster_id) + expected[cluster_id] = { + 'id': cluster_id, + 'name': cluster_name, + 'normalizedinstancehours': '0', + 'state': 'TERMINATED' + } + + args = {} + while 1: + resp = conn.list_clusters(**args) + clusters = resp.clusters + len(clusters).should.be.lower_than_or_equal_to(50) + for x in clusters: + y = expected[x.id] + x.id.should.equal(y['id']) + x.name.should.equal(y['name']) + x.normalizedinstancehours.should.equal( + y['normalizedinstancehours']) + x.status.state.should.equal(y['state']) + x.status.timeline.creationdatetime.should.be.a(six.string_types) + if y['state'] == 'TERMINATED': + x.status.timeline.enddatetime.should.be.a(six.string_types) + else: + x.status.timeline.shouldnt.have.property('enddatetime') + x.status.timeline.readydatetime.should.be.a(six.string_types) + if not hasattr(resp, 'marker'): + break + args = {'marker': resp.marker} + + resp = conn.list_clusters(cluster_states=['TERMINATED']) + resp.clusters.should.have.length_of(30) + for x in resp.clusters: + x.status.state.should.equal('TERMINATED') + + resp = conn.list_clusters(created_before=timestamp) + resp.clusters.should.have.length_of(40) + + resp = conn.list_clusters(created_after=timestamp) + resp.clusters.should.have.length_of(30) + + +@mock_emr_deprecated +def test_run_jobflow(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + job_id = conn.run_jobflow(**args) + job_flow = conn.describe_jobflow(job_id) + job_flow.state.should.equal('WAITING') + job_flow.jobflowid.should.equal(job_id) + job_flow.name.should.equal(args['name']) + job_flow.masterinstancetype.should.equal(args['master_instance_type']) + job_flow.slaveinstancetype.should.equal(args['slave_instance_type']) + job_flow.loguri.should.equal(args['log_uri']) + job_flow.visibletoallusers.should.equal('false') + int(job_flow.normalizedinstancehours).should.equal(0) + job_flow.steps.should.have.length_of(0) + + +@mock_emr_deprecated +def test_run_jobflow_in_multiple_regions(): + regions = {} + for region in ['us-east-1', 'eu-west-1']: + conn = boto.emr.connect_to_region(region) + args = run_jobflow_args.copy() + args['name'] = region + cluster_id = conn.run_jobflow(**args) + regions[region] = {'conn': conn, 'cluster_id': cluster_id} + + for region in regions.keys(): + conn = regions[region]['conn'] + jf = conn.describe_jobflow(regions[region]['cluster_id']) + jf.name.should.equal(region) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_new_params(): + # Test that run_jobflow works with newer params + conn = boto.connect_emr() + conn.run_jobflow(**run_jobflow_args) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_visible_to_all_users(): + conn = boto.connect_emr() + for expected in (True, False): + job_id = conn.run_jobflow( + visible_to_all_users=expected, + **run_jobflow_args + ) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal(str(expected).lower()) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_run_jobflow_with_instance_groups(): + input_groups = dict((g.name, g) for g in input_instance_groups) + conn = boto.connect_emr() + job_id = conn.run_jobflow(instance_groups=input_instance_groups, + **run_jobflow_args) + job_flow = conn.describe_jobflow(job_id) + int(job_flow.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) + for instance_group in job_flow.instancegroups: + expected = input_groups[instance_group.name] + instance_group.should.have.property('instancegroupid') + int(instance_group.instancerunningcount).should.equal( + expected.num_instances) + instance_group.instancerole.should.equal(expected.role) + instance_group.instancetype.should.equal(expected.type) + instance_group.market.should.equal(expected.market) + if hasattr(expected, 'bidprice'): + instance_group.bidprice.should.equal(expected.bidprice) + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_set_termination_protection(): + conn = boto.connect_emr() + job_id = conn.run_jobflow(**run_jobflow_args) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('false') + + conn.set_termination_protection(job_id, True) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('true') + + conn.set_termination_protection(job_id, False) + job_flow = conn.describe_jobflow(job_id) + job_flow.terminationprotected.should.equal('false') + + +@requires_boto_gte("2.8") +@mock_emr_deprecated +def test_set_visible_to_all_users(): + conn = boto.connect_emr() + args = run_jobflow_args.copy() + args['visible_to_all_users'] = False + job_id = conn.run_jobflow(**args) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('false') + + conn.set_visible_to_all_users(job_id, True) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('true') + + conn.set_visible_to_all_users(job_id, False) + job_flow = conn.describe_jobflow(job_id) + job_flow.visibletoallusers.should.equal('false') + + +@mock_emr_deprecated +def test_terminate_jobflow(): + conn = boto.connect_emr() + job_id = conn.run_jobflow(**run_jobflow_args) + flow = conn.describe_jobflows()[0] + flow.state.should.equal('WAITING') + + conn.terminate_jobflow(job_id) + flow = conn.describe_jobflows()[0] + flow.state.should.equal('TERMINATED') + + +# testing multiple end points for each feature + +@mock_emr_deprecated +def test_bootstrap_actions(): + bootstrap_actions = [ + BootstrapAction( + name='bs1', + path='path/to/script', + bootstrap_action_args=['arg1', 'arg2&arg3']), + BootstrapAction( + name='bs2', + path='path/to/anotherscript', + bootstrap_action_args=[]) + ] + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow( + bootstrap_actions=bootstrap_actions, + **run_jobflow_args + ) + + jf = conn.describe_jobflow(cluster_id) + for x, y in zip(jf.bootstrapactions, bootstrap_actions): + x.name.should.equal(y.name) + x.path.should.equal(y.path) + list(o.value for o in x.args).should.equal(y.args()) + + resp = conn.list_bootstrap_actions(cluster_id) + for i, y in enumerate(bootstrap_actions): + x = resp.actions[i] + x.name.should.equal(y.name) + x.scriptpath.should.equal(y.path) + list(arg.value for arg in x.args).should.equal(y.args()) + + +@mock_emr_deprecated +def test_instance_groups(): + input_groups = dict((g.name, g) for g in input_instance_groups) + + conn = boto.connect_emr() + args = run_jobflow_args.copy() + for key in ['master_instance_type', 'slave_instance_type', 'num_instances']: + del args[key] + args['instance_groups'] = input_instance_groups[:2] + job_id = conn.run_jobflow(**args) + + jf = conn.describe_jobflow(job_id) + base_instance_count = int(jf.instancecount) + + conn.add_instance_groups(job_id, input_instance_groups[2:]) + + jf = conn.describe_jobflow(job_id) + int(jf.instancecount).should.equal( + sum(g.num_instances for g in input_instance_groups)) + for x in jf.instancegroups: + y = input_groups[x.name] + if hasattr(y, 'bidprice'): + x.bidprice.should.equal(y.bidprice) + x.creationdatetime.should.be.a(six.string_types) + # x.enddatetime.should.be.a(six.string_types) + x.should.have.property('instancegroupid') + int(x.instancerequestcount).should.equal(y.num_instances) + x.instancerole.should.equal(y.role) + int(x.instancerunningcount).should.equal(y.num_instances) + x.instancetype.should.equal(y.type) + x.laststatechangereason.should.be.a(six.string_types) + x.market.should.equal(y.market) + x.name.should.be.a(six.string_types) + x.readydatetime.should.be.a(six.string_types) + x.startdatetime.should.be.a(six.string_types) + x.state.should.equal('RUNNING') + + for x in conn.list_instance_groups(job_id).instancegroups: + y = input_groups[x.name] + if hasattr(y, 'bidprice'): + x.bidprice.should.equal(y.bidprice) + # Configurations + # EbsBlockDevices + # EbsOptimized + x.should.have.property('id') + x.instancegrouptype.should.equal(y.role) + x.instancetype.should.equal(y.type) + x.market.should.equal(y.market) + x.name.should.equal(y.name) + int(x.requestedinstancecount).should.equal(y.num_instances) + int(x.runninginstancecount).should.equal(y.num_instances) + # ShrinkPolicy + x.status.state.should.equal('RUNNING') + x.status.statechangereason.code.should.be.a(six.string_types) + x.status.statechangereason.message.should.be.a(six.string_types) + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + x.status.timeline.readydatetime.should.be.a(six.string_types) + + igs = dict((g.name, g) for g in jf.instancegroups) + + conn.modify_instance_groups( + [igs['task-1'].instancegroupid, igs['task-2'].instancegroupid], + [2, 3]) + jf = conn.describe_jobflow(job_id) + int(jf.instancecount).should.equal(base_instance_count + 5) + igs = dict((g.name, g) for g in jf.instancegroups) + int(igs['task-1'].instancerunningcount).should.equal(2) + int(igs['task-2'].instancerunningcount).should.equal(3) + + +@mock_emr_deprecated +def test_steps(): + input_steps = [ + StreamingStep( + name='My wordcount example', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input', + output='s3n://output_bucket/output/wordcount_output'), + StreamingStep( + name='My wordcount example & co.', + mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', + reducer='aggregate', + input='s3n://elasticmapreduce/samples/wordcount/input2', + output='s3n://output_bucket/output/wordcount_output2') + ] + + # TODO: implementation and test for cancel_steps + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow( + steps=[input_steps[0]], + **run_jobflow_args) + + jf = conn.describe_jobflow(cluster_id) + jf.steps.should.have.length_of(1) + + conn.add_jobflow_steps(cluster_id, [input_steps[1]]) + + jf = conn.describe_jobflow(cluster_id) + jf.steps.should.have.length_of(2) + for step in jf.steps: + step.actiononfailure.should.equal('TERMINATE_JOB_FLOW') + list(arg.value for arg in step.args).should.have.length_of(8) + step.creationdatetime.should.be.a(six.string_types) + # step.enddatetime.should.be.a(six.string_types) + step.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + step.laststatechangereason.should.be.a(six.string_types) + step.mainclass.should.equal('') + step.name.should.be.a(six.string_types) + # step.readydatetime.should.be.a(six.string_types) + # step.startdatetime.should.be.a(six.string_types) + step.state.should.be.within(['STARTING', 'PENDING']) + + expected = dict((s.name, s) for s in input_steps) + + steps = conn.list_steps(cluster_id).steps + for x in steps: + y = expected[x.name] + # actiononfailure + list(arg.value for arg in x.config.args).should.equal([ + '-mapper', y.mapper, + '-reducer', y.reducer, + '-input', y.input, + '-output', y.output, + ]) + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.mainclass.should.equal('') + # properties + x.should.have.property('id').should.be.a(six.string_types) + x.name.should.equal(y.name) + x.status.state.should.be.within(['STARTING', 'PENDING']) + # x.status.statechangereason + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + # x.status.timeline.startdatetime.should.be.a(six.string_types) + + x = conn.describe_step(cluster_id, x.id) + list(arg.value for arg in x.config.args).should.equal([ + '-mapper', y.mapper, + '-reducer', y.reducer, + '-input', y.input, + '-output', y.output, + ]) + x.config.jar.should.equal( + '/home/hadoop/contrib/streaming/hadoop-streaming.jar') + x.config.mainclass.should.equal('') + # properties + x.should.have.property('id').should.be.a(six.string_types) + x.name.should.equal(y.name) + x.status.state.should.be.within(['STARTING', 'PENDING']) + # x.status.statechangereason + x.status.timeline.creationdatetime.should.be.a(six.string_types) + # x.status.timeline.enddatetime.should.be.a(six.string_types) + # x.status.timeline.startdatetime.should.be.a(six.string_types) + + @requires_boto_gte('2.39') + def test_list_steps_with_states(): + # boto's list_steps prior to 2.39 has a bug that ignores + # step_states argument. + steps = conn.list_steps(cluster_id).steps + step_id = steps[0].id + steps = conn.list_steps(cluster_id, step_states=['STARTING']).steps + steps.should.have.length_of(1) + steps[0].id.should.equal(step_id) + test_list_steps_with_states() + + +@mock_emr_deprecated +def test_tags(): + input_tags = {"tag1": "val1", "tag2": "val2"} + + conn = boto.connect_emr() + cluster_id = conn.run_jobflow(**run_jobflow_args) + + conn.add_tags(cluster_id, input_tags) + cluster = conn.describe_cluster(cluster_id) + cluster.tags.should.have.length_of(2) + dict((t.key, t.value) for t in cluster.tags).should.equal(input_tags) + + conn.remove_tags(cluster_id, list(input_tags.keys())) + cluster = conn.describe_cluster(cluster_id) + cluster.tags.should.have.length_of(0) diff --git a/tests/test_emr/test_emr_boto3.py b/tests/test_emr/test_emr_boto3.py index 237ff8bba..28fff455b 100644 --- a/tests/test_emr/test_emr_boto3.py +++ b/tests/test_emr/test_emr_boto3.py @@ -1,720 +1,720 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import time -from copy import deepcopy -from datetime import datetime - -import boto3 -import pytz -import six -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_emr - - -run_job_flow_args = dict( - Instances={ - 'InstanceCount': 3, - 'KeepJobFlowAliveWhenNoSteps': True, - 'MasterInstanceType': 'c3.medium', - 'Placement': {'AvailabilityZone': 'us-east-1a'}, - 'SlaveInstanceType': 'c3.xlarge', - }, - JobFlowRole='EMR_EC2_DefaultRole', - LogUri='s3://mybucket/log', - Name='cluster', - ServiceRole='EMR_DefaultRole', - VisibleToAllUsers=True) - - -input_instance_groups = [ - {'InstanceCount': 1, - 'InstanceRole': 'MASTER', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'master'}, - {'InstanceCount': 3, - 'InstanceRole': 'CORE', - 'InstanceType': 'c1.medium', - 'Market': 'ON_DEMAND', - 'Name': 'core'}, - {'InstanceCount': 6, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.large', - 'Market': 'SPOT', - 'Name': 'task-1', - 'BidPrice': '0.07'}, - {'InstanceCount': 10, - 'InstanceRole': 'TASK', - 'InstanceType': 'c1.xlarge', - 'Market': 'SPOT', - 'Name': 'task-2', - 'BidPrice': '0.05'}, -] - - -@mock_emr -def test_describe_cluster(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] - args['Configurations'] = [ - {'Classification': 'yarn-site', - 'Properties': {'someproperty': 'somevalue', - 'someotherproperty': 'someothervalue'}}, - {'Classification': 'nested-configs', - 'Properties': {}, - 'Configurations': [ - { - 'Classification': 'nested-config', - 'Properties': { - 'nested-property': 'nested-value' - } - } - ]} - ] - args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] - args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] - args['Instances']['Ec2KeyName'] = 'mykey' - args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' - args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' - args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' - args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False - args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' - args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, - {'Key': 'tag2', 'Value': 'val2'}] - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - cl['Applications'][0]['Name'].should.equal('Spark') - cl['Applications'][0]['Version'].should.equal('2.4.2') - cl['AutoTerminate'].should.equal(True) - - config = cl['Configurations'][0] - config['Classification'].should.equal('yarn-site') - config['Properties'].should.equal(args['Configurations'][0]['Properties']) - - nested_config = cl['Configurations'][1] - nested_config['Classification'].should.equal('nested-configs') - nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) - - attrs = cl['Ec2InstanceAttributes'] - attrs['AdditionalMasterSecurityGroups'].should.equal( - args['Instances']['AdditionalMasterSecurityGroups']) - attrs['AdditionalSlaveSecurityGroups'].should.equal( - args['Instances']['AdditionalSlaveSecurityGroups']) - attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['EmrManagedMasterSecurityGroup'].should.equal( - args['Instances']['EmrManagedMasterSecurityGroup']) - attrs['EmrManagedSlaveSecurityGroup'].should.equal( - args['Instances']['EmrManagedSlaveSecurityGroup']) - attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) - attrs['ServiceAccessSecurityGroup'].should.equal( - args['Instances']['ServiceAccessSecurityGroup']) - cl['Id'].should.equal(cluster_id) - cl['LogUri'].should.equal(args['LogUri']) - cl['MasterPublicDnsName'].should.be.a(six.string_types) - cl['Name'].should.equal(args['Name']) - cl['NormalizedInstanceHours'].should.equal(0) - # cl['ReleaseLabel'].should.equal('emr-5.0.0') - cl.shouldnt.have.key('RequestedAmiVersion') - cl['RunningAmiVersion'].should.equal('1.0.0') - # cl['SecurityConfiguration'].should.be.a(six.string_types) - cl['ServiceRole'].should.equal(args['ServiceRole']) - - status = cl['Status'] - status['State'].should.equal('TERMINATED') - # cluster['Status']['StateChangeReason'] - status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') - # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) - status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') - - dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in args['Tags'])) - - cl['TerminationProtected'].should.equal(False) - cl['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_describe_cluster_not_found(): - conn = boto3.client('emr', region_name='us-east-1') - raised = False - try: - cluster = conn.describe_cluster(ClusterId='DummyId') - except ClientError as e: - if e.response['Error']['Code'] == "ResourceNotFoundException": - raised = True - raised.should.equal(True) - - -@mock_emr -def test_describe_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(4): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(4, 6): - cluster_name = 'cluster' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'State': 'TERMINATED' - } - - resp = client.describe_job_flows() - resp['JobFlows'].should.have.length_of(6) - - for cluster_id, y in expected.items(): - resp = client.describe_job_flows(JobFlowIds=[cluster_id]) - resp['JobFlows'].should.have.length_of(1) - resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) - - resp = client.describe_job_flows(JobFlowStates=['WAITING']) - resp['JobFlows'].should.have.length_of(4) - for x in resp['JobFlows']: - x['ExecutionStatusDetail']['State'].should.equal('WAITING') - - resp = client.describe_job_flows(CreatedBefore=timestamp) - resp['JobFlows'].should.have.length_of(4) - - resp = client.describe_job_flows(CreatedAfter=timestamp) - resp['JobFlows'].should.have.length_of(2) - - -@mock_emr -def test_describe_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '3.8.1' - args['Instances'].update( - {'Ec2KeyName': 'ec2keyname', - 'Ec2SubnetId': 'subnet-8be41cec', - 'HadoopVersion': '2.4.0'}) - args['VisibleToAllUsers'] = True - - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - - jf['AmiVersion'].should.equal(args['AmiVersion']) - jf.shouldnt.have.key('BootstrapActions') - esd = jf['ExecutionStatusDetail'] - esd['CreationDateTime'].should.be.a('datetime.datetime') - # esd['EndDateTime'].should.be.a('datetime.datetime') - # esd['LastStateChangeReason'].should.be.a(six.string_types) - esd['ReadyDateTime'].should.be.a('datetime.datetime') - esd['StartDateTime'].should.be.a('datetime.datetime') - esd['State'].should.equal('WAITING') - attrs = jf['Instances'] - attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) - attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) - attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) - attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) - for ig in attrs['InstanceGroups']: - # ig['BidPrice'] - ig['CreationDateTime'].should.be.a('datetime.datetime') - # ig['EndDateTime'].should.be.a('datetime.datetime') - ig['InstanceGroupId'].should.be.a(six.string_types) - ig['InstanceRequestCount'].should.be.a(int) - ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) - ig['InstanceRunningCount'].should.be.a(int) - ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) - # ig['LastStateChangeReason'].should.be.a(six.string_types) - ig['Market'].should.equal('ON_DEMAND') - ig['Name'].should.be.a(six.string_types) - ig['ReadyDateTime'].should.be.a('datetime.datetime') - ig['StartDateTime'].should.be.a('datetime.datetime') - ig['State'].should.equal('RUNNING') - attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) - # attrs['MasterInstanceId'].should.be.a(six.string_types) - attrs['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - attrs['MasterPublicDnsName'].should.be.a(six.string_types) - attrs['NormalizedInstanceHours'].should.equal(0) - attrs['Placement']['AvailabilityZone'].should.equal( - args['Instances']['Placement']['AvailabilityZone']) - attrs['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - attrs['TerminationProtected'].should.equal(False) - jf['JobFlowId'].should.equal(cluster_id) - jf['JobFlowRole'].should.equal(args['JobFlowRole']) - jf['LogUri'].should.equal(args['LogUri']) - jf['Name'].should.equal(args['Name']) - jf['ServiceRole'].should.equal(args['ServiceRole']) - jf['Steps'].should.equal([]) - jf['SupportedProducts'].should.equal([]) - jf['VisibleToAllUsers'].should.equal(True) - - -@mock_emr -def test_list_clusters(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - expected = {} - - for idx in range(40): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'WAITING' - } - - # need sleep since it appears the timestamp is always rounded to - # the nearest second internally - time.sleep(1) - timestamp = datetime.now(pytz.utc) - time.sleep(1) - - for idx in range(40, 70): - cluster_name = 'jobflow' + str(idx) - args['Name'] = cluster_name - cluster_id = client.run_job_flow(**args)['JobFlowId'] - client.terminate_job_flows(JobFlowIds=[cluster_id]) - expected[cluster_id] = { - 'Id': cluster_id, - 'Name': cluster_name, - 'NormalizedInstanceHours': 0, - 'State': 'TERMINATED' - } - - args = {} - while 1: - resp = client.list_clusters(**args) - clusters = resp['Clusters'] - len(clusters).should.be.lower_than_or_equal_to(50) - for x in clusters: - y = expected[x['Id']] - x['Id'].should.equal(y['Id']) - x['Name'].should.equal(y['Name']) - x['NormalizedInstanceHours'].should.equal( - y['NormalizedInstanceHours']) - x['Status']['State'].should.equal(y['State']) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - if y['State'] == 'TERMINATED': - x['Status']['Timeline'][ - 'EndDateTime'].should.be.a('datetime.datetime') - else: - x['Status']['Timeline'].shouldnt.have.key('EndDateTime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - marker = resp.get('Marker') - if marker is None: - break - args = {'Marker': marker} - - resp = client.list_clusters(ClusterStates=['TERMINATED']) - resp['Clusters'].should.have.length_of(30) - for x in resp['Clusters']: - x['Status']['State'].should.equal('TERMINATED') - - resp = client.list_clusters(CreatedBefore=timestamp) - resp['Clusters'].should.have.length_of(40) - - resp = client.list_clusters(CreatedAfter=timestamp) - resp['Clusters'].should.have.length_of(30) - - -@mock_emr -def test_run_job_flow(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - cluster_id = client.run_job_flow(**args)['JobFlowId'] - resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - resp['ExecutionStatusDetail']['State'].should.equal('WAITING') - resp['JobFlowId'].should.equal(cluster_id) - resp['Name'].should.equal(args['Name']) - resp['Instances']['MasterInstanceType'].should.equal( - args['Instances']['MasterInstanceType']) - resp['Instances']['SlaveInstanceType'].should.equal( - args['Instances']['SlaveInstanceType']) - resp['LogUri'].should.equal(args['LogUri']) - resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) - resp['Instances']['NormalizedInstanceHours'].should.equal(0) - resp['Steps'].should.equal([]) - - -@mock_emr -def test_run_job_flow_with_invalid_params(): - client = boto3.client('emr', region_name='us-east-1') - with assert_raises(ClientError) as ex: - # cannot set both AmiVersion and ReleaseLabel - args = deepcopy(run_job_flow_args) - args['AmiVersion'] = '2.4' - args['ReleaseLabel'] = 'emr-5.0.0' - client.run_job_flow(**args) - ex.exception.response['Error']['Code'].should.equal('ValidationException') - - -@mock_emr -def test_run_job_flow_in_multiple_regions(): - regions = {} - for region in ['us-east-1', 'eu-west-1']: - client = boto3.client('emr', region_name=region) - args = deepcopy(run_job_flow_args) - args['Name'] = region - cluster_id = client.run_job_flow(**args)['JobFlowId'] - regions[region] = {'client': client, 'cluster_id': cluster_id} - - for region in regions.keys(): - client = regions[region]['client'] - resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) - resp['Cluster']['Name'].should.equal(region) - - -@mock_emr -def test_run_job_flow_with_new_params(): - client = boto3.client('emr', region_name='us-east-1') - resp = client.run_job_flow(**run_job_flow_args) - resp.should.have.key('JobFlowId') - - -@mock_emr -def test_run_job_flow_with_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - for expected in (True, False): - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = expected - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_run_job_flow_with_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances'] = {'InstanceGroups': input_instance_groups} - cluster_id = client.run_job_flow(**args)['JobFlowId'] - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - x.should.have.key('Id') - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - if 'BidPrice' in y: - x['BidPrice'].should.equal(y['BidPrice']) - - -@mock_emr -def test_set_termination_protection(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Instances']['TerminationProtected'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(False) - - for expected in (True, False): - resp = client.set_termination_protection(JobFlowIds=[cluster_id], - TerminationProtected=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['TerminationProtected'].should.equal(expected) - - -@mock_emr -def test_set_visible_to_all_users(): - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['VisibleToAllUsers'] = False - resp = client.run_job_flow(**args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(False) - - for expected in (True, False): - resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], - VisibleToAllUsers=expected) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['VisibleToAllUsers'].should.equal(expected) - - -@mock_emr -def test_terminate_job_flows(): - client = boto3.client('emr', region_name='us-east-1') - - resp = client.run_job_flow(**run_job_flow_args) - cluster_id = resp['JobFlowId'] - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('WAITING') - - resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) - resp = client.describe_cluster(ClusterId=cluster_id) - resp['Cluster']['Status']['State'].should.equal('TERMINATED') - - -# testing multiple end points for each feature - -@mock_emr -def test_bootstrap_actions(): - bootstrap_actions = [ - {'Name': 'bs1', - 'ScriptBootstrapAction': { - 'Args': ['arg1', 'arg2'], - 'Path': 's3://path/to/script'}}, - {'Name': 'bs2', - 'ScriptBootstrapAction': { - 'Args': [], - 'Path': 's3://path/to/anotherscript'}} - ] - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['BootstrapActions'] = bootstrap_actions - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - for x, y in zip(cl['BootstrapActions'], bootstrap_actions): - x['BootstrapActionConfig'].should.equal(y) - - resp = client.list_bootstrap_actions(ClusterId=cluster_id) - for x, y in zip(resp['BootstrapActions'], bootstrap_actions): - x['Name'].should.equal(y['Name']) - if 'Args' in y['ScriptBootstrapAction']: - x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) - x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) - - -@mock_emr -def test_instance_groups(): - input_groups = dict((g['Name'], g) for g in input_instance_groups) - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: - del args['Instances'][key] - args['Instances']['InstanceGroups'] = input_instance_groups[:2] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - base_instance_count = jf['Instances']['InstanceCount'] - - client.add_instance_groups( - JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal( - sum(g['InstanceCount'] for g in input_instance_groups)) - for x in jf['Instances']['InstanceGroups']: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - x['CreationDateTime'].should.be.a('datetime.datetime') - # x['EndDateTime'].should.be.a('datetime.datetime') - x.should.have.key('InstanceGroupId') - x['InstanceRequestCount'].should.equal(y['InstanceCount']) - x['InstanceRole'].should.equal(y['InstanceRole']) - x['InstanceRunningCount'].should.equal(y['InstanceCount']) - x['InstanceType'].should.equal(y['InstanceType']) - # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['ReadyDateTime'].should.be.a('datetime.datetime') - x['StartDateTime'].should.be.a('datetime.datetime') - x['State'].should.equal('RUNNING') - - groups = client.list_instance_groups(ClusterId=cluster_id)[ - 'InstanceGroups'] - for x in groups: - y = input_groups[x['Name']] - if hasattr(y, 'BidPrice'): - x['BidPrice'].should.equal('BidPrice') - # Configurations - # EbsBlockDevices - # EbsOptimized - x.should.have.key('Id') - x['InstanceGroupType'].should.equal(y['InstanceRole']) - x['InstanceType'].should.equal(y['InstanceType']) - x['Market'].should.equal(y['Market']) - x['Name'].should.equal(y['Name']) - x['RequestedInstanceCount'].should.equal(y['InstanceCount']) - x['RunningInstanceCount'].should.equal(y['InstanceCount']) - # ShrinkPolicy - x['Status']['State'].should.equal('RUNNING') - x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) - # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - x['Status']['Timeline'][ - 'ReadyDateTime'].should.be.a('datetime.datetime') - - igs = dict((g['Name'], g) for g in groups) - client.modify_instance_groups( - InstanceGroups=[ - {'InstanceGroupId': igs['task-1']['Id'], - 'InstanceCount': 2}, - {'InstanceGroupId': igs['task-2']['Id'], - 'InstanceCount': 3}]) - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) - igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) - igs['task-1']['InstanceRunningCount'].should.equal(2) - igs['task-2']['InstanceRunningCount'].should.equal(3) - - -@mock_emr -def test_steps(): - input_steps = [{ - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', - '-mapper', 'python wordSplitter.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input', - '-output', 's3://output_bucket/output/wordcount_output', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example', - }, { - 'HadoopJarStep': { - 'Args': [ - 'hadoop-streaming', - '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', - '-mapper', 'python wordSplitter2.py', - '-input', 's3://elasticmapreduce/samples/wordcount/input2', - '-output', 's3://output_bucket/output/wordcount_output2', - '-reducer', 'aggregate' - ], - 'Jar': 'command-runner.jar', - }, - 'Name': 'My wordcount example2', - }] - - # TODO: implementation and test for cancel_steps - - client = boto3.client('emr', region_name='us-east-1') - args = deepcopy(run_job_flow_args) - args['Steps'] = [input_steps[0]] - cluster_id = client.run_job_flow(**args)['JobFlowId'] - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(1) - - client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) - - jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] - jf['Steps'].should.have.length_of(2) - for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): - x['ExecutionStatusDetail'].should.have.key('CreationDateTime') - # x['ExecutionStatusDetail'].should.have.key('EndDateTime') - # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') - # x['ExecutionStatusDetail'].should.have.key('StartDateTime') - x['ExecutionStatusDetail']['State'].should.equal( - 'STARTING' if idx == 0 else 'PENDING') - x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['StepConfig']['HadoopJarStep'][ - 'Args'].should.equal(y['HadoopJarStep']['Args']) - x['StepConfig']['HadoopJarStep'][ - 'Jar'].should.equal(y['HadoopJarStep']['Jar']) - if 'MainClass' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( - y['HadoopJarStep']['MainClass']) - if 'Properties' in y['HadoopJarStep']: - x['StepConfig']['HadoopJarStep']['Properties'].should.equal( - y['HadoopJarStep']['Properties']) - x['StepConfig']['Name'].should.equal(y['Name']) - - expected = dict((s['Name'], s) for s in input_steps) - - steps = client.list_steps(ClusterId=cluster_id)['Steps'] - steps.should.have.length_of(2) - for x in steps: - y = expected[x['Name']] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] - x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') - x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) - x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) - # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) - # Properties - x['Id'].should.be.a(six.string_types) - x['Name'].should.equal(y['Name']) - x['Status']['State'].should.be.within(['STARTING', 'PENDING']) - # StateChangeReason - x['Status']['Timeline'][ - 'CreationDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') - # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') - - step_id = steps[0]['Id'] - steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - steps = client.list_steps(ClusterId=cluster_id, - StepStates=['STARTING'])['Steps'] - steps.should.have.length_of(1) - steps[0]['Id'].should.equal(step_id) - - -@mock_emr -def test_tags(): - input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, - {'Key': 'newkey2', 'Value': 'newval2'}] - - client = boto3.client('emr', region_name='us-east-1') - cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] - - client.add_tags(ResourceId=cluster_id, Tags=input_tags) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.have.length_of(2) - dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( - dict((t['Key'], t['Value']) for t in input_tags)) - - client.remove_tags(ResourceId=cluster_id, TagKeys=[ - t['Key'] for t in input_tags]) - resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] - resp['Tags'].should.equal([]) +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import time +from copy import deepcopy +from datetime import datetime + +import boto3 +import pytz +import six +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_emr + + +run_job_flow_args = dict( + Instances={ + 'InstanceCount': 3, + 'KeepJobFlowAliveWhenNoSteps': True, + 'MasterInstanceType': 'c3.medium', + 'Placement': {'AvailabilityZone': 'us-east-1a'}, + 'SlaveInstanceType': 'c3.xlarge', + }, + JobFlowRole='EMR_EC2_DefaultRole', + LogUri='s3://mybucket/log', + Name='cluster', + ServiceRole='EMR_DefaultRole', + VisibleToAllUsers=True) + + +input_instance_groups = [ + {'InstanceCount': 1, + 'InstanceRole': 'MASTER', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'master'}, + {'InstanceCount': 3, + 'InstanceRole': 'CORE', + 'InstanceType': 'c1.medium', + 'Market': 'ON_DEMAND', + 'Name': 'core'}, + {'InstanceCount': 6, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.large', + 'Market': 'SPOT', + 'Name': 'task-1', + 'BidPrice': '0.07'}, + {'InstanceCount': 10, + 'InstanceRole': 'TASK', + 'InstanceType': 'c1.xlarge', + 'Market': 'SPOT', + 'Name': 'task-2', + 'BidPrice': '0.05'}, +] + + +@mock_emr +def test_describe_cluster(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['Applications'] = [{'Name': 'Spark', 'Version': '2.4.2'}] + args['Configurations'] = [ + {'Classification': 'yarn-site', + 'Properties': {'someproperty': 'somevalue', + 'someotherproperty': 'someothervalue'}}, + {'Classification': 'nested-configs', + 'Properties': {}, + 'Configurations': [ + { + 'Classification': 'nested-config', + 'Properties': { + 'nested-property': 'nested-value' + } + } + ]} + ] + args['Instances']['AdditionalMasterSecurityGroups'] = ['additional-master'] + args['Instances']['AdditionalSlaveSecurityGroups'] = ['additional-slave'] + args['Instances']['Ec2KeyName'] = 'mykey' + args['Instances']['Ec2SubnetId'] = 'subnet-8be41cec' + args['Instances']['EmrManagedMasterSecurityGroup'] = 'master-security-group' + args['Instances']['EmrManagedSlaveSecurityGroup'] = 'slave-security-group' + args['Instances']['KeepJobFlowAliveWhenNoSteps'] = False + args['Instances']['ServiceAccessSecurityGroup'] = 'service-access-security-group' + args['Tags'] = [{'Key': 'tag1', 'Value': 'val1'}, + {'Key': 'tag2', 'Value': 'val2'}] + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + cl['Applications'][0]['Name'].should.equal('Spark') + cl['Applications'][0]['Version'].should.equal('2.4.2') + cl['AutoTerminate'].should.equal(True) + + config = cl['Configurations'][0] + config['Classification'].should.equal('yarn-site') + config['Properties'].should.equal(args['Configurations'][0]['Properties']) + + nested_config = cl['Configurations'][1] + nested_config['Classification'].should.equal('nested-configs') + nested_config['Properties'].should.equal(args['Configurations'][1]['Properties']) + + attrs = cl['Ec2InstanceAttributes'] + attrs['AdditionalMasterSecurityGroups'].should.equal( + args['Instances']['AdditionalMasterSecurityGroups']) + attrs['AdditionalSlaveSecurityGroups'].should.equal( + args['Instances']['AdditionalSlaveSecurityGroups']) + attrs['Ec2AvailabilityZone'].should.equal('us-east-1a') + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['EmrManagedMasterSecurityGroup'].should.equal( + args['Instances']['EmrManagedMasterSecurityGroup']) + attrs['EmrManagedSlaveSecurityGroup'].should.equal( + args['Instances']['EmrManagedSlaveSecurityGroup']) + attrs['IamInstanceProfile'].should.equal(args['JobFlowRole']) + attrs['ServiceAccessSecurityGroup'].should.equal( + args['Instances']['ServiceAccessSecurityGroup']) + cl['Id'].should.equal(cluster_id) + cl['LogUri'].should.equal(args['LogUri']) + cl['MasterPublicDnsName'].should.be.a(six.string_types) + cl['Name'].should.equal(args['Name']) + cl['NormalizedInstanceHours'].should.equal(0) + # cl['ReleaseLabel'].should.equal('emr-5.0.0') + cl.shouldnt.have.key('RequestedAmiVersion') + cl['RunningAmiVersion'].should.equal('1.0.0') + # cl['SecurityConfiguration'].should.be.a(six.string_types) + cl['ServiceRole'].should.equal(args['ServiceRole']) + + status = cl['Status'] + status['State'].should.equal('TERMINATED') + # cluster['Status']['StateChangeReason'] + status['Timeline']['CreationDateTime'].should.be.a('datetime.datetime') + # status['Timeline']['EndDateTime'].should.equal(datetime(2014, 1, 24, 2, 19, 46, tzinfo=pytz.utc)) + status['Timeline']['ReadyDateTime'].should.be.a('datetime.datetime') + + dict((t['Key'], t['Value']) for t in cl['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in args['Tags'])) + + cl['TerminationProtected'].should.equal(False) + cl['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_describe_cluster_not_found(): + conn = boto3.client('emr', region_name='us-east-1') + raised = False + try: + cluster = conn.describe_cluster(ClusterId='DummyId') + except ClientError as e: + if e.response['Error']['Code'] == "ResourceNotFoundException": + raised = True + raised.should.equal(True) + + +@mock_emr +def test_describe_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(4): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(4, 6): + cluster_name = 'cluster' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'State': 'TERMINATED' + } + + resp = client.describe_job_flows() + resp['JobFlows'].should.have.length_of(6) + + for cluster_id, y in expected.items(): + resp = client.describe_job_flows(JobFlowIds=[cluster_id]) + resp['JobFlows'].should.have.length_of(1) + resp['JobFlows'][0]['JobFlowId'].should.equal(cluster_id) + + resp = client.describe_job_flows(JobFlowStates=['WAITING']) + resp['JobFlows'].should.have.length_of(4) + for x in resp['JobFlows']: + x['ExecutionStatusDetail']['State'].should.equal('WAITING') + + resp = client.describe_job_flows(CreatedBefore=timestamp) + resp['JobFlows'].should.have.length_of(4) + + resp = client.describe_job_flows(CreatedAfter=timestamp) + resp['JobFlows'].should.have.length_of(2) + + +@mock_emr +def test_describe_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '3.8.1' + args['Instances'].update( + {'Ec2KeyName': 'ec2keyname', + 'Ec2SubnetId': 'subnet-8be41cec', + 'HadoopVersion': '2.4.0'}) + args['VisibleToAllUsers'] = True + + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + + jf['AmiVersion'].should.equal(args['AmiVersion']) + jf.shouldnt.have.key('BootstrapActions') + esd = jf['ExecutionStatusDetail'] + esd['CreationDateTime'].should.be.a('datetime.datetime') + # esd['EndDateTime'].should.be.a('datetime.datetime') + # esd['LastStateChangeReason'].should.be.a(six.string_types) + esd['ReadyDateTime'].should.be.a('datetime.datetime') + esd['StartDateTime'].should.be.a('datetime.datetime') + esd['State'].should.equal('WAITING') + attrs = jf['Instances'] + attrs['Ec2KeyName'].should.equal(args['Instances']['Ec2KeyName']) + attrs['Ec2SubnetId'].should.equal(args['Instances']['Ec2SubnetId']) + attrs['HadoopVersion'].should.equal(args['Instances']['HadoopVersion']) + attrs['InstanceCount'].should.equal(args['Instances']['InstanceCount']) + for ig in attrs['InstanceGroups']: + # ig['BidPrice'] + ig['CreationDateTime'].should.be.a('datetime.datetime') + # ig['EndDateTime'].should.be.a('datetime.datetime') + ig['InstanceGroupId'].should.be.a(six.string_types) + ig['InstanceRequestCount'].should.be.a(int) + ig['InstanceRole'].should.be.within(['MASTER', 'CORE']) + ig['InstanceRunningCount'].should.be.a(int) + ig['InstanceType'].should.be.within(['c3.medium', 'c3.xlarge']) + # ig['LastStateChangeReason'].should.be.a(six.string_types) + ig['Market'].should.equal('ON_DEMAND') + ig['Name'].should.be.a(six.string_types) + ig['ReadyDateTime'].should.be.a('datetime.datetime') + ig['StartDateTime'].should.be.a('datetime.datetime') + ig['State'].should.equal('RUNNING') + attrs['KeepJobFlowAliveWhenNoSteps'].should.equal(True) + # attrs['MasterInstanceId'].should.be.a(six.string_types) + attrs['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + attrs['MasterPublicDnsName'].should.be.a(six.string_types) + attrs['NormalizedInstanceHours'].should.equal(0) + attrs['Placement']['AvailabilityZone'].should.equal( + args['Instances']['Placement']['AvailabilityZone']) + attrs['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + attrs['TerminationProtected'].should.equal(False) + jf['JobFlowId'].should.equal(cluster_id) + jf['JobFlowRole'].should.equal(args['JobFlowRole']) + jf['LogUri'].should.equal(args['LogUri']) + jf['Name'].should.equal(args['Name']) + jf['ServiceRole'].should.equal(args['ServiceRole']) + jf['Steps'].should.equal([]) + jf['SupportedProducts'].should.equal([]) + jf['VisibleToAllUsers'].should.equal(True) + + +@mock_emr +def test_list_clusters(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + expected = {} + + for idx in range(40): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'WAITING' + } + + # need sleep since it appears the timestamp is always rounded to + # the nearest second internally + time.sleep(1) + timestamp = datetime.now(pytz.utc) + time.sleep(1) + + for idx in range(40, 70): + cluster_name = 'jobflow' + str(idx) + args['Name'] = cluster_name + cluster_id = client.run_job_flow(**args)['JobFlowId'] + client.terminate_job_flows(JobFlowIds=[cluster_id]) + expected[cluster_id] = { + 'Id': cluster_id, + 'Name': cluster_name, + 'NormalizedInstanceHours': 0, + 'State': 'TERMINATED' + } + + args = {} + while 1: + resp = client.list_clusters(**args) + clusters = resp['Clusters'] + len(clusters).should.be.lower_than_or_equal_to(50) + for x in clusters: + y = expected[x['Id']] + x['Id'].should.equal(y['Id']) + x['Name'].should.equal(y['Name']) + x['NormalizedInstanceHours'].should.equal( + y['NormalizedInstanceHours']) + x['Status']['State'].should.equal(y['State']) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + if y['State'] == 'TERMINATED': + x['Status']['Timeline'][ + 'EndDateTime'].should.be.a('datetime.datetime') + else: + x['Status']['Timeline'].shouldnt.have.key('EndDateTime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + marker = resp.get('Marker') + if marker is None: + break + args = {'Marker': marker} + + resp = client.list_clusters(ClusterStates=['TERMINATED']) + resp['Clusters'].should.have.length_of(30) + for x in resp['Clusters']: + x['Status']['State'].should.equal('TERMINATED') + + resp = client.list_clusters(CreatedBefore=timestamp) + resp['Clusters'].should.have.length_of(40) + + resp = client.list_clusters(CreatedAfter=timestamp) + resp['Clusters'].should.have.length_of(30) + + +@mock_emr +def test_run_job_flow(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + cluster_id = client.run_job_flow(**args)['JobFlowId'] + resp = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + resp['ExecutionStatusDetail']['State'].should.equal('WAITING') + resp['JobFlowId'].should.equal(cluster_id) + resp['Name'].should.equal(args['Name']) + resp['Instances']['MasterInstanceType'].should.equal( + args['Instances']['MasterInstanceType']) + resp['Instances']['SlaveInstanceType'].should.equal( + args['Instances']['SlaveInstanceType']) + resp['LogUri'].should.equal(args['LogUri']) + resp['VisibleToAllUsers'].should.equal(args['VisibleToAllUsers']) + resp['Instances']['NormalizedInstanceHours'].should.equal(0) + resp['Steps'].should.equal([]) + + +@mock_emr +def test_run_job_flow_with_invalid_params(): + client = boto3.client('emr', region_name='us-east-1') + with assert_raises(ClientError) as ex: + # cannot set both AmiVersion and ReleaseLabel + args = deepcopy(run_job_flow_args) + args['AmiVersion'] = '2.4' + args['ReleaseLabel'] = 'emr-5.0.0' + client.run_job_flow(**args) + ex.exception.response['Error']['Code'].should.equal('ValidationException') + + +@mock_emr +def test_run_job_flow_in_multiple_regions(): + regions = {} + for region in ['us-east-1', 'eu-west-1']: + client = boto3.client('emr', region_name=region) + args = deepcopy(run_job_flow_args) + args['Name'] = region + cluster_id = client.run_job_flow(**args)['JobFlowId'] + regions[region] = {'client': client, 'cluster_id': cluster_id} + + for region in regions.keys(): + client = regions[region]['client'] + resp = client.describe_cluster(ClusterId=regions[region]['cluster_id']) + resp['Cluster']['Name'].should.equal(region) + + +@mock_emr +def test_run_job_flow_with_new_params(): + client = boto3.client('emr', region_name='us-east-1') + resp = client.run_job_flow(**run_job_flow_args) + resp.should.have.key('JobFlowId') + + +@mock_emr +def test_run_job_flow_with_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + for expected in (True, False): + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = expected + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_run_job_flow_with_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances'] = {'InstanceGroups': input_instance_groups} + cluster_id = client.run_job_flow(**args)['JobFlowId'] + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + x.should.have.key('Id') + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + if 'BidPrice' in y: + x['BidPrice'].should.equal(y['BidPrice']) + + +@mock_emr +def test_set_termination_protection(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Instances']['TerminationProtected'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(False) + + for expected in (True, False): + resp = client.set_termination_protection(JobFlowIds=[cluster_id], + TerminationProtected=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['TerminationProtected'].should.equal(expected) + + +@mock_emr +def test_set_visible_to_all_users(): + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['VisibleToAllUsers'] = False + resp = client.run_job_flow(**args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(False) + + for expected in (True, False): + resp = client.set_visible_to_all_users(JobFlowIds=[cluster_id], + VisibleToAllUsers=expected) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['VisibleToAllUsers'].should.equal(expected) + + +@mock_emr +def test_terminate_job_flows(): + client = boto3.client('emr', region_name='us-east-1') + + resp = client.run_job_flow(**run_job_flow_args) + cluster_id = resp['JobFlowId'] + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('WAITING') + + resp = client.terminate_job_flows(JobFlowIds=[cluster_id]) + resp = client.describe_cluster(ClusterId=cluster_id) + resp['Cluster']['Status']['State'].should.equal('TERMINATED') + + +# testing multiple end points for each feature + +@mock_emr +def test_bootstrap_actions(): + bootstrap_actions = [ + {'Name': 'bs1', + 'ScriptBootstrapAction': { + 'Args': ['arg1', 'arg2'], + 'Path': 's3://path/to/script'}}, + {'Name': 'bs2', + 'ScriptBootstrapAction': { + 'Args': [], + 'Path': 's3://path/to/anotherscript'}} + ] + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['BootstrapActions'] = bootstrap_actions + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + cl = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + for x, y in zip(cl['BootstrapActions'], bootstrap_actions): + x['BootstrapActionConfig'].should.equal(y) + + resp = client.list_bootstrap_actions(ClusterId=cluster_id) + for x, y in zip(resp['BootstrapActions'], bootstrap_actions): + x['Name'].should.equal(y['Name']) + if 'Args' in y['ScriptBootstrapAction']: + x['Args'].should.equal(y['ScriptBootstrapAction']['Args']) + x['ScriptPath'].should.equal(y['ScriptBootstrapAction']['Path']) + + +@mock_emr +def test_instance_groups(): + input_groups = dict((g['Name'], g) for g in input_instance_groups) + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + for key in ['MasterInstanceType', 'SlaveInstanceType', 'InstanceCount']: + del args['Instances'][key] + args['Instances']['InstanceGroups'] = input_instance_groups[:2] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + base_instance_count = jf['Instances']['InstanceCount'] + + client.add_instance_groups( + JobFlowId=cluster_id, InstanceGroups=input_instance_groups[2:]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal( + sum(g['InstanceCount'] for g in input_instance_groups)) + for x in jf['Instances']['InstanceGroups']: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + x['CreationDateTime'].should.be.a('datetime.datetime') + # x['EndDateTime'].should.be.a('datetime.datetime') + x.should.have.key('InstanceGroupId') + x['InstanceRequestCount'].should.equal(y['InstanceCount']) + x['InstanceRole'].should.equal(y['InstanceRole']) + x['InstanceRunningCount'].should.equal(y['InstanceCount']) + x['InstanceType'].should.equal(y['InstanceType']) + # x['LastStateChangeReason'].should.equal(y['LastStateChangeReason']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['ReadyDateTime'].should.be.a('datetime.datetime') + x['StartDateTime'].should.be.a('datetime.datetime') + x['State'].should.equal('RUNNING') + + groups = client.list_instance_groups(ClusterId=cluster_id)[ + 'InstanceGroups'] + for x in groups: + y = input_groups[x['Name']] + if hasattr(y, 'BidPrice'): + x['BidPrice'].should.equal('BidPrice') + # Configurations + # EbsBlockDevices + # EbsOptimized + x.should.have.key('Id') + x['InstanceGroupType'].should.equal(y['InstanceRole']) + x['InstanceType'].should.equal(y['InstanceType']) + x['Market'].should.equal(y['Market']) + x['Name'].should.equal(y['Name']) + x['RequestedInstanceCount'].should.equal(y['InstanceCount']) + x['RunningInstanceCount'].should.equal(y['InstanceCount']) + # ShrinkPolicy + x['Status']['State'].should.equal('RUNNING') + x['Status']['StateChangeReason']['Code'].should.be.a(six.string_types) + # x['Status']['StateChangeReason']['Message'].should.be.a(six.string_types) + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + x['Status']['Timeline'][ + 'ReadyDateTime'].should.be.a('datetime.datetime') + + igs = dict((g['Name'], g) for g in groups) + client.modify_instance_groups( + InstanceGroups=[ + {'InstanceGroupId': igs['task-1']['Id'], + 'InstanceCount': 2}, + {'InstanceGroupId': igs['task-2']['Id'], + 'InstanceCount': 3}]) + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Instances']['InstanceCount'].should.equal(base_instance_count + 5) + igs = dict((g['Name'], g) for g in jf['Instances']['InstanceGroups']) + igs['task-1']['InstanceRunningCount'].should.equal(2) + igs['task-2']['InstanceRunningCount'].should.equal(3) + + +@mock_emr +def test_steps(): + input_steps = [{ + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter.py#wordSplitter.py', + '-mapper', 'python wordSplitter.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input', + '-output', 's3://output_bucket/output/wordcount_output', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example', + }, { + 'HadoopJarStep': { + 'Args': [ + 'hadoop-streaming', + '-files', 's3://elasticmapreduce/samples/wordcount/wordSplitter2.py#wordSplitter2.py', + '-mapper', 'python wordSplitter2.py', + '-input', 's3://elasticmapreduce/samples/wordcount/input2', + '-output', 's3://output_bucket/output/wordcount_output2', + '-reducer', 'aggregate' + ], + 'Jar': 'command-runner.jar', + }, + 'Name': 'My wordcount example2', + }] + + # TODO: implementation and test for cancel_steps + + client = boto3.client('emr', region_name='us-east-1') + args = deepcopy(run_job_flow_args) + args['Steps'] = [input_steps[0]] + cluster_id = client.run_job_flow(**args)['JobFlowId'] + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(1) + + client.add_job_flow_steps(JobFlowId=cluster_id, Steps=[input_steps[1]]) + + jf = client.describe_job_flows(JobFlowIds=[cluster_id])['JobFlows'][0] + jf['Steps'].should.have.length_of(2) + for idx, (x, y) in enumerate(zip(jf['Steps'], input_steps)): + x['ExecutionStatusDetail'].should.have.key('CreationDateTime') + # x['ExecutionStatusDetail'].should.have.key('EndDateTime') + # x['ExecutionStatusDetail'].should.have.key('LastStateChangeReason') + # x['ExecutionStatusDetail'].should.have.key('StartDateTime') + x['ExecutionStatusDetail']['State'].should.equal( + 'STARTING' if idx == 0 else 'PENDING') + x['StepConfig']['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['StepConfig']['HadoopJarStep'][ + 'Args'].should.equal(y['HadoopJarStep']['Args']) + x['StepConfig']['HadoopJarStep'][ + 'Jar'].should.equal(y['HadoopJarStep']['Jar']) + if 'MainClass' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['MainClass'].should.equal( + y['HadoopJarStep']['MainClass']) + if 'Properties' in y['HadoopJarStep']: + x['StepConfig']['HadoopJarStep']['Properties'].should.equal( + y['HadoopJarStep']['Properties']) + x['StepConfig']['Name'].should.equal(y['Name']) + + expected = dict((s['Name'], s) for s in input_steps) + + steps = client.list_steps(ClusterId=cluster_id)['Steps'] + steps.should.have.length_of(2) + for x in steps: + y = expected[x['Name']] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + x = client.describe_step(ClusterId=cluster_id, StepId=x['Id'])['Step'] + x['ActionOnFailure'].should.equal('TERMINATE_CLUSTER') + x['Config']['Args'].should.equal(y['HadoopJarStep']['Args']) + x['Config']['Jar'].should.equal(y['HadoopJarStep']['Jar']) + # x['Config']['MainClass'].should.equal(y['HadoopJarStep']['MainClass']) + # Properties + x['Id'].should.be.a(six.string_types) + x['Name'].should.equal(y['Name']) + x['Status']['State'].should.be.within(['STARTING', 'PENDING']) + # StateChangeReason + x['Status']['Timeline'][ + 'CreationDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['EndDateTime'].should.be.a('datetime.datetime') + # x['Status']['Timeline']['StartDateTime'].should.be.a('datetime.datetime') + + step_id = steps[0]['Id'] + steps = client.list_steps(ClusterId=cluster_id, StepIds=[step_id])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + steps = client.list_steps(ClusterId=cluster_id, + StepStates=['STARTING'])['Steps'] + steps.should.have.length_of(1) + steps[0]['Id'].should.equal(step_id) + + +@mock_emr +def test_tags(): + input_tags = [{'Key': 'newkey1', 'Value': 'newval1'}, + {'Key': 'newkey2', 'Value': 'newval2'}] + + client = boto3.client('emr', region_name='us-east-1') + cluster_id = client.run_job_flow(**run_job_flow_args)['JobFlowId'] + + client.add_tags(ResourceId=cluster_id, Tags=input_tags) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.have.length_of(2) + dict((t['Key'], t['Value']) for t in resp['Tags']).should.equal( + dict((t['Key'], t['Value']) for t in input_tags)) + + client.remove_tags(ResourceId=cluster_id, TagKeys=[ + t['Key'] for t in input_tags]) + resp = client.describe_cluster(ClusterId=cluster_id)['Cluster'] + resp['Tags'].should.equal([]) diff --git a/tests/test_emr/test_server.py b/tests/test_emr/test_server.py index 56eba3ff8..f2b215ec7 100644 --- a/tests/test_emr/test_server.py +++ b/tests/test_emr/test_server.py @@ -1,18 +1,18 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_describe_jobflows(): - backend = server.create_backend_app("emr") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeJobFlows') - - res.data.should.contain(b'') - res.data.should.contain(b'') +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_describe_jobflows(): + backend = server.create_backend_app("emr") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeJobFlows') + + res.data.should.contain(b'') + res.data.should.contain(b'') diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 80630c5b8..d459af533 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,211 +1,211 @@ -import random - -import boto3 -import json - -from moto.events import mock_events -from botocore.exceptions import ClientError -from nose.tools import assert_raises - - -RULES = [ - {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, - {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, - {'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'} -] - -TARGETS = { - 'test-target-1': { - 'Id': 'test-target-1', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1', - 'Rules': ['test1', 'test2'] - }, - 'test-target-2': { - 'Id': 'test-target-2', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2', - 'Rules': ['test1', 'test3'] - }, - 'test-target-3': { - 'Id': 'test-target-3', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3', - 'Rules': ['test1', 'test2'] - }, - 'test-target-4': { - 'Id': 'test-target-4', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4', - 'Rules': ['test1', 'test3'] - }, - 'test-target-5': { - 'Id': 'test-target-5', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5', - 'Rules': ['test1', 'test2'] - }, - 'test-target-6': { - 'Id': 'test-target-6', - 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6', - 'Rules': ['test1', 'test3'] - } -} - - -def get_random_rule(): - return RULES[random.randint(0, len(RULES) - 1)] - - -def generate_environment(): - client = boto3.client('events', 'us-west-2') - - for rule in RULES: - client.put_rule( - Name=rule['Name'], - ScheduleExpression=rule.get('ScheduleExpression', ''), - EventPattern=rule.get('EventPattern', '') - ) - - targets = [] - for target in TARGETS: - if rule['Name'] in TARGETS[target].get('Rules'): - targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']}) - - client.put_targets(Rule=rule['Name'], Targets=targets) - - return client - - -@mock_events -def test_list_rules(): - client = generate_environment() - response = client.list_rules() - - assert(response is not None) - assert(len(response['Rules']) > 0) - - -@mock_events -def test_describe_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - response = client.describe_rule(Name=rule_name) - - assert(response is not None) - assert(response.get('Name') == rule_name) - assert(response.get('Arn') is not None) - - -@mock_events -def test_enable_disable_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - - # Rules should start out enabled in these tests. - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'ENABLED') - - client.disable_rule(Name=rule_name) - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'DISABLED') - - client.enable_rule(Name=rule_name) - rule = client.describe_rule(Name=rule_name) - assert(rule['State'] == 'ENABLED') - - -@mock_events -def test_list_rule_names_by_target(): - test_1_target = TARGETS['test-target-1'] - test_2_target = TARGETS['test-target-2'] - client = generate_environment() - - rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) - assert(len(rules['RuleNames']) == len(test_1_target['Rules'])) - for rule in rules['RuleNames']: - assert(rule in test_1_target['Rules']) - - rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) - assert(len(rules['RuleNames']) == len(test_2_target['Rules'])) - for rule in rules['RuleNames']: - assert(rule in test_2_target['Rules']) - - -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert(len(rules['Rules']) == len(RULES)) - - -@mock_events -def test_delete_rule(): - client = generate_environment() - - client.delete_rule(Name=RULES[0]['Name']) - rules = client.list_rules() - assert(len(rules['Rules']) == len(RULES) - 1) - - -@mock_events -def test_list_targets_by_rule(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - targets = client.list_targets_by_rule(Rule=rule_name) - - expected_targets = [] - for target in TARGETS: - if rule_name in TARGETS[target].get('Rules'): - expected_targets.append(target) - - assert(len(targets['Targets']) == len(expected_targets)) - - -@mock_events -def test_remove_targets(): - rule_name = get_random_rule()['Name'] - client = generate_environment() - - targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] - targets_before = len(targets) - assert(targets_before > 0) - - client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']]) - - targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] - targets_after = len(targets) - assert(targets_before - 1 == targets_after) - - -@mock_events -def test_permissions(): - client = boto3.client('events', 'eu-central-1') - - client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1') - client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2') - - resp = client.describe_event_bus() - resp_policy = json.loads(resp['Policy']) - assert len(resp_policy['Statement']) == 2 - - client.remove_permission(StatementId='Account2') - - resp = client.describe_event_bus() - resp_policy = json.loads(resp['Policy']) - assert len(resp_policy['Statement']) == 1 - assert resp_policy['Statement'][0]['Sid'] == 'Account1' - - -@mock_events -def test_put_events(): - client = boto3.client('events', 'eu-central-1') - - event = { - "Source": "com.mycompany.myapp", - "Detail": '{"key1": "value3", "key2": "value4"}', - "Resources": ["resource1", "resource2"], - "DetailType": "myDetailType" - } - - client.put_events(Entries=[event]) - # Boto3 would error if it didn't return 200 OK - - with assert_raises(ClientError): - client.put_events(Entries=[event]*20) +import random + +import boto3 +import json + +from moto.events import mock_events +from botocore.exceptions import ClientError +from nose.tools import assert_raises + + +RULES = [ + {'Name': 'test1', 'ScheduleExpression': 'rate(5 minutes)'}, + {'Name': 'test2', 'ScheduleExpression': 'rate(1 minute)'}, + {'Name': 'test3', 'EventPattern': '{"source": ["test-source"]}'} +] + +TARGETS = { + 'test-target-1': { + 'Id': 'test-target-1', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-1', + 'Rules': ['test1', 'test2'] + }, + 'test-target-2': { + 'Id': 'test-target-2', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-2', + 'Rules': ['test1', 'test3'] + }, + 'test-target-3': { + 'Id': 'test-target-3', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-3', + 'Rules': ['test1', 'test2'] + }, + 'test-target-4': { + 'Id': 'test-target-4', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-4', + 'Rules': ['test1', 'test3'] + }, + 'test-target-5': { + 'Id': 'test-target-5', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-5', + 'Rules': ['test1', 'test2'] + }, + 'test-target-6': { + 'Id': 'test-target-6', + 'Arn': 'arn:aws:lambda:us-west-2:111111111111:function:test-function-6', + 'Rules': ['test1', 'test3'] + } +} + + +def get_random_rule(): + return RULES[random.randint(0, len(RULES) - 1)] + + +def generate_environment(): + client = boto3.client('events', 'us-west-2') + + for rule in RULES: + client.put_rule( + Name=rule['Name'], + ScheduleExpression=rule.get('ScheduleExpression', ''), + EventPattern=rule.get('EventPattern', '') + ) + + targets = [] + for target in TARGETS: + if rule['Name'] in TARGETS[target].get('Rules'): + targets.append({'Id': target, 'Arn': TARGETS[target]['Arn']}) + + client.put_targets(Rule=rule['Name'], Targets=targets) + + return client + + +@mock_events +def test_list_rules(): + client = generate_environment() + response = client.list_rules() + + assert(response is not None) + assert(len(response['Rules']) > 0) + + +@mock_events +def test_describe_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + response = client.describe_rule(Name=rule_name) + + assert(response is not None) + assert(response.get('Name') == rule_name) + assert(response.get('Arn') is not None) + + +@mock_events +def test_enable_disable_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + # Rules should start out enabled in these tests. + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + client.disable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'DISABLED') + + client.enable_rule(Name=rule_name) + rule = client.describe_rule(Name=rule_name) + assert(rule['State'] == 'ENABLED') + + +@mock_events +def test_list_rule_names_by_target(): + test_1_target = TARGETS['test-target-1'] + test_2_target = TARGETS['test-target-2'] + client = generate_environment() + + rules = client.list_rule_names_by_target(TargetArn=test_1_target['Arn']) + assert(len(rules['RuleNames']) == len(test_1_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_1_target['Rules']) + + rules = client.list_rule_names_by_target(TargetArn=test_2_target['Arn']) + assert(len(rules['RuleNames']) == len(test_2_target['Rules'])) + for rule in rules['RuleNames']: + assert(rule in test_2_target['Rules']) + + +@mock_events +def test_list_rules(): + client = generate_environment() + + rules = client.list_rules() + assert(len(rules['Rules']) == len(RULES)) + + +@mock_events +def test_delete_rule(): + client = generate_environment() + + client.delete_rule(Name=RULES[0]['Name']) + rules = client.list_rules() + assert(len(rules['Rules']) == len(RULES) - 1) + + +@mock_events +def test_list_targets_by_rule(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + targets = client.list_targets_by_rule(Rule=rule_name) + + expected_targets = [] + for target in TARGETS: + if rule_name in TARGETS[target].get('Rules'): + expected_targets.append(target) + + assert(len(targets['Targets']) == len(expected_targets)) + + +@mock_events +def test_remove_targets(): + rule_name = get_random_rule()['Name'] + client = generate_environment() + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_before = len(targets) + assert(targets_before > 0) + + client.remove_targets(Rule=rule_name, Ids=[targets[0]['Id']]) + + targets = client.list_targets_by_rule(Rule=rule_name)['Targets'] + targets_after = len(targets) + assert(targets_before - 1 == targets_after) + + +@mock_events +def test_permissions(): + client = boto3.client('events', 'eu-central-1') + + client.put_permission(Action='events:PutEvents', Principal='111111111111', StatementId='Account1') + client.put_permission(Action='events:PutEvents', Principal='222222222222', StatementId='Account2') + + resp = client.describe_event_bus() + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 2 + + client.remove_permission(StatementId='Account2') + + resp = client.describe_event_bus() + resp_policy = json.loads(resp['Policy']) + assert len(resp_policy['Statement']) == 1 + assert resp_policy['Statement'][0]['Sid'] == 'Account1' + + +@mock_events +def test_put_events(): + client = boto3.client('events', 'eu-central-1') + + event = { + "Source": "com.mycompany.myapp", + "Detail": '{"key1": "value3", "key2": "value4"}', + "Resources": ["resource1", "resource2"], + "DetailType": "myDetailType" + } + + client.put_events(Entries=[event]) + # Boto3 would error if it didn't return 200 OK + + with assert_raises(ClientError): + client.put_events(Entries=[event]*20) diff --git a/tests/test_glacier/test_glacier_archives.py b/tests/test_glacier/test_glacier_archives.py index e8fa6045e..ec43e613c 100644 --- a/tests/test_glacier/test_glacier_archives.py +++ b/tests/test_glacier/test_glacier_archives.py @@ -1,21 +1,21 @@ -from __future__ import unicode_literals - -from tempfile import NamedTemporaryFile -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_and_delete_archive(): - the_file = NamedTemporaryFile(delete=False) - the_file.write(b"some stuff") - the_file.close() - - conn = boto.glacier.connect_to_region("us-west-2") - vault = conn.create_vault("my_vault") - - archive_id = vault.upload_archive(the_file.name) - - vault.delete_archive(archive_id) +from __future__ import unicode_literals + +from tempfile import NamedTemporaryFile +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_and_delete_archive(): + the_file = NamedTemporaryFile(delete=False) + the_file.write(b"some stuff") + the_file.close() + + conn = boto.glacier.connect_to_region("us-west-2") + vault = conn.create_vault("my_vault") + + archive_id = vault.upload_archive(the_file.name) + + vault.delete_archive(archive_id) diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 152aa14c8..761b47a66 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -1,90 +1,90 @@ -from __future__ import unicode_literals - -import json -import time - -from boto.glacier.layer1 import Layer1 -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_init_glacier_job(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - job_response['Location'].should.equal( - "//vaults/my_vault/jobs/{0}".format(job_id)) - - -@mock_glacier_deprecated -def test_describe_job(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - - job = conn.describe_job(vault_name, job_id) - joboutput = json.loads(job.read().decode("utf-8")) - - joboutput.should.have.key('Tier').which.should.equal('Standard') - joboutput.should.have.key('StatusCode').which.should.equal('InProgress') - joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') - - -@mock_glacier_deprecated -def test_list_glacier_jobs(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_id1 = conn.upload_archive( - vault_name, "some stuff", "", "", "some description")['ArchiveId'] - archive_id2 = conn.upload_archive( - vault_name, "some other stuff", "", "", "some description")['ArchiveId'] - - conn.initiate_job(vault_name, { - "ArchiveId": archive_id1, - "Type": "archive-retrieval", - }) - conn.initiate_job(vault_name, { - "ArchiveId": archive_id2, - "Type": "archive-retrieval", - }) - - jobs = conn.list_jobs(vault_name) - len(jobs['JobList']).should.equal(2) - - -@mock_glacier_deprecated -def test_get_job_output(): - conn = Layer1(region_name="us-west-2") - vault_name = "my_vault" - conn.create_vault(vault_name) - archive_response = conn.upload_archive( - vault_name, "some stuff", "", "", "some description") - archive_id = archive_response['ArchiveId'] - job_response = conn.initiate_job(vault_name, { - "ArchiveId": archive_id, - "Type": "archive-retrieval", - }) - job_id = job_response['JobId'] - - time.sleep(6) - - output = conn.get_job_output(vault_name, job_id) - output.read().decode("utf-8").should.equal("some stuff") +from __future__ import unicode_literals + +import json +import time + +from boto.glacier.layer1 import Layer1 +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_init_glacier_job(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + job_response['Location'].should.equal( + "//vaults/my_vault/jobs/{0}".format(job_id)) + + +@mock_glacier_deprecated +def test_describe_job(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + + job = conn.describe_job(vault_name, job_id) + joboutput = json.loads(job.read().decode("utf-8")) + + joboutput.should.have.key('Tier').which.should.equal('Standard') + joboutput.should.have.key('StatusCode').which.should.equal('InProgress') + joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') + + +@mock_glacier_deprecated +def test_list_glacier_jobs(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_id1 = conn.upload_archive( + vault_name, "some stuff", "", "", "some description")['ArchiveId'] + archive_id2 = conn.upload_archive( + vault_name, "some other stuff", "", "", "some description")['ArchiveId'] + + conn.initiate_job(vault_name, { + "ArchiveId": archive_id1, + "Type": "archive-retrieval", + }) + conn.initiate_job(vault_name, { + "ArchiveId": archive_id2, + "Type": "archive-retrieval", + }) + + jobs = conn.list_jobs(vault_name) + len(jobs['JobList']).should.equal(2) + + +@mock_glacier_deprecated +def test_get_job_output(): + conn = Layer1(region_name="us-west-2") + vault_name = "my_vault" + conn.create_vault(vault_name) + archive_response = conn.upload_archive( + vault_name, "some stuff", "", "", "some description") + archive_id = archive_response['ArchiveId'] + job_response = conn.initiate_job(vault_name, { + "ArchiveId": archive_id, + "Type": "archive-retrieval", + }) + job_id = job_response['JobId'] + + time.sleep(6) + + output = conn.get_job_output(vault_name, job_id) + output.read().decode("utf-8").should.equal("some stuff") diff --git a/tests/test_glacier/test_glacier_server.py b/tests/test_glacier/test_glacier_server.py index fd8034421..b6c03428e 100644 --- a/tests/test_glacier/test_glacier_server.py +++ b/tests/test_glacier/test_glacier_server.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_glacier - -''' -Test the different server responses -''' - - -@mock_glacier -def test_list_vaults(): - backend = server.create_backend_app("glacier") - test_client = backend.test_client() - - res = test_client.get('/1234bcd/vaults') - - json.loads(res.data.decode("utf-8") - ).should.equal({u'Marker': None, u'VaultList': []}) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_glacier + +''' +Test the different server responses +''' + + +@mock_glacier +def test_list_vaults(): + backend = server.create_backend_app("glacier") + test_client = backend.test_client() + + res = test_client.get('/1234bcd/vaults') + + json.loads(res.data.decode("utf-8") + ).should.equal({u'Marker': None, u'VaultList': []}) diff --git a/tests/test_glacier/test_glacier_vaults.py b/tests/test_glacier/test_glacier_vaults.py index e64f40a90..93c79423e 100644 --- a/tests/test_glacier/test_glacier_vaults.py +++ b/tests/test_glacier/test_glacier_vaults.py @@ -1,31 +1,31 @@ -from __future__ import unicode_literals - -import boto.glacier -import sure # noqa - -from moto import mock_glacier_deprecated - - -@mock_glacier_deprecated -def test_create_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - vaults[0].name.should.equal("my_vault") - - -@mock_glacier_deprecated -def test_delete_vault(): - conn = boto.glacier.connect_to_region("us-west-2") - - conn.create_vault("my_vault") - - vaults = conn.list_vaults() - vaults.should.have.length_of(1) - - conn.delete_vault("my_vault") - vaults = conn.list_vaults() - vaults.should.have.length_of(0) +from __future__ import unicode_literals + +import boto.glacier +import sure # noqa + +from moto import mock_glacier_deprecated + + +@mock_glacier_deprecated +def test_create_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + vaults[0].name.should.equal("my_vault") + + +@mock_glacier_deprecated +def test_delete_vault(): + conn = boto.glacier.connect_to_region("us-west-2") + + conn.create_vault("my_vault") + + vaults = conn.list_vaults() + vaults.should.have.length_of(1) + + conn.delete_vault("my_vault") + vaults = conn.list_vaults() + vaults.should.have.length_of(0) diff --git a/tests/test_glue/__init__.py b/tests/test_glue/__init__.py index baffc4882..78b780d97 100644 --- a/tests/test_glue/__init__.py +++ b/tests/test_glue/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/__init__.py b/tests/test_glue/fixtures/__init__.py index baffc4882..78b780d97 100644 --- a/tests/test_glue/fixtures/__init__.py +++ b/tests/test_glue/fixtures/__init__.py @@ -1 +1 @@ -from __future__ import unicode_literals +from __future__ import unicode_literals diff --git a/tests/test_glue/fixtures/datacatalog.py b/tests/test_glue/fixtures/datacatalog.py index edad2f0f4..13136158b 100644 --- a/tests/test_glue/fixtures/datacatalog.py +++ b/tests/test_glue/fixtures/datacatalog.py @@ -1,56 +1,56 @@ -from __future__ import unicode_literals - -TABLE_INPUT = { - 'Owner': 'a_fake_owner', - 'Parameters': { - 'EXTERNAL': 'TRUE', - }, - 'Retention': 0, - 'StorageDescriptor': { - 'BucketColumns': [], - 'Compressed': False, - 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', - 'NumberOfBuckets': -1, - 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', - 'Parameters': {}, - 'SerdeInfo': { - 'Parameters': { - 'serialization.format': '1' - }, - 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' - }, - 'SkewedInfo': { - 'SkewedColumnNames': [], - 'SkewedColumnValueLocationMaps': {}, - 'SkewedColumnValues': [] - }, - 'SortColumns': [], - 'StoredAsSubDirectories': False - }, - 'TableType': 'EXTERNAL_TABLE', -} - - -PARTITION_INPUT = { - # 'DatabaseName': 'dbname', - 'StorageDescriptor': { - 'BucketColumns': [], - 'Columns': [], - 'Compressed': False, - 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', - 'Location': 's3://.../partition=value', - 'NumberOfBuckets': -1, - 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', - 'Parameters': {}, - 'SerdeInfo': { - 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, - 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, - 'SkewedInfo': {'SkewedColumnNames': [], - 'SkewedColumnValueLocationMaps': {}, - 'SkewedColumnValues': []}, - 'SortColumns': [], - 'StoredAsSubDirectories': False, - }, - # 'TableName': 'source_table', - # 'Values': ['2018-06-26'], -} +from __future__ import unicode_literals + +TABLE_INPUT = { + 'Owner': 'a_fake_owner', + 'Parameters': { + 'EXTERNAL': 'TRUE', + }, + 'Retention': 0, + 'StorageDescriptor': { + 'BucketColumns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': { + 'serialization.format': '1' + }, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe' + }, + 'SkewedInfo': { + 'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': [] + }, + 'SortColumns': [], + 'StoredAsSubDirectories': False + }, + 'TableType': 'EXTERNAL_TABLE', +} + + +PARTITION_INPUT = { + # 'DatabaseName': 'dbname', + 'StorageDescriptor': { + 'BucketColumns': [], + 'Columns': [], + 'Compressed': False, + 'InputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat', + 'Location': 's3://.../partition=value', + 'NumberOfBuckets': -1, + 'OutputFormat': 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat', + 'Parameters': {}, + 'SerdeInfo': { + 'Parameters': {'path': 's3://...', 'serialization.format': '1'}, + 'SerializationLibrary': 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'}, + 'SkewedInfo': {'SkewedColumnNames': [], + 'SkewedColumnValueLocationMaps': {}, + 'SkewedColumnValues': []}, + 'SortColumns': [], + 'StoredAsSubDirectories': False, + }, + # 'TableName': 'source_table', + # 'Values': ['2018-06-26'], +} diff --git a/tests/test_glue/helpers.py b/tests/test_glue/helpers.py index 331b99867..48908532c 100644 --- a/tests/test_glue/helpers.py +++ b/tests/test_glue/helpers.py @@ -1,119 +1,119 @@ -from __future__ import unicode_literals - -import copy - -from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT - - -def create_database(client, database_name): - return client.create_database( - DatabaseInput={ - 'Name': database_name - } - ) - - -def get_database(client, database_name): - return client.get_database(Name=database_name) - - -def create_table_input(database_name, table_name, columns=[], partition_keys=[]): - table_input = copy.deepcopy(TABLE_INPUT) - table_input['Name'] = table_name - table_input['PartitionKeys'] = partition_keys - table_input['StorageDescriptor']['Columns'] = columns - table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - return table_input - - -def create_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.create_table( - DatabaseName=database_name, - TableInput=table_input - ) - - -def update_table(client, database_name, table_name, table_input=None, **kwargs): - if table_input is None: - table_input = create_table_input(database_name, table_name, **kwargs) - - return client.update_table( - DatabaseName=database_name, - TableInput=table_input, - ) - - -def get_table(client, database_name, table_name): - return client.get_table( - DatabaseName=database_name, - Name=table_name - ) - - -def get_tables(client, database_name): - return client.get_tables( - DatabaseName=database_name - ) - - -def get_table_versions(client, database_name, table_name): - return client.get_table_versions( - DatabaseName=database_name, - TableName=table_name - ) - - -def get_table_version(client, database_name, table_name, version_id): - return client.get_table_version( - DatabaseName=database_name, - TableName=table_name, - VersionId=version_id, - ) - - -def create_partition_input(database_name, table_name, values=[], columns=[]): - root_path = 's3://my-bucket/{database_name}/{table_name}'.format( - database_name=database_name, - table_name=table_name - ) - - part_input = copy.deepcopy(PARTITION_INPUT) - part_input['Values'] = values - part_input['StorageDescriptor']['Columns'] = columns - part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path - return part_input - - -def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.create_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionInput=partiton_input - ) - - -def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): - if partiton_input is None: - partiton_input = create_partition_input(database_name, table_name, **kwargs) - return client.update_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionInput=partiton_input, - PartitionValueList=old_values, - ) - - -def get_partition(client, database_name, table_name, values): - return client.get_partition( - DatabaseName=database_name, - TableName=table_name, - PartitionValues=values, - ) +from __future__ import unicode_literals + +import copy + +from .fixtures.datacatalog import TABLE_INPUT, PARTITION_INPUT + + +def create_database(client, database_name): + return client.create_database( + DatabaseInput={ + 'Name': database_name + } + ) + + +def get_database(client, database_name): + return client.get_database(Name=database_name) + + +def create_table_input(database_name, table_name, columns=[], partition_keys=[]): + table_input = copy.deepcopy(TABLE_INPUT) + table_input['Name'] = table_name + table_input['PartitionKeys'] = partition_keys + table_input['StorageDescriptor']['Columns'] = columns + table_input['StorageDescriptor']['Location'] = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + return table_input + + +def create_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.create_table( + DatabaseName=database_name, + TableInput=table_input + ) + + +def update_table(client, database_name, table_name, table_input=None, **kwargs): + if table_input is None: + table_input = create_table_input(database_name, table_name, **kwargs) + + return client.update_table( + DatabaseName=database_name, + TableInput=table_input, + ) + + +def get_table(client, database_name, table_name): + return client.get_table( + DatabaseName=database_name, + Name=table_name + ) + + +def get_tables(client, database_name): + return client.get_tables( + DatabaseName=database_name + ) + + +def get_table_versions(client, database_name, table_name): + return client.get_table_versions( + DatabaseName=database_name, + TableName=table_name + ) + + +def get_table_version(client, database_name, table_name, version_id): + return client.get_table_version( + DatabaseName=database_name, + TableName=table_name, + VersionId=version_id, + ) + + +def create_partition_input(database_name, table_name, values=[], columns=[]): + root_path = 's3://my-bucket/{database_name}/{table_name}'.format( + database_name=database_name, + table_name=table_name + ) + + part_input = copy.deepcopy(PARTITION_INPUT) + part_input['Values'] = values + part_input['StorageDescriptor']['Columns'] = columns + part_input['StorageDescriptor']['SerdeInfo']['Parameters']['path'] = root_path + return part_input + + +def create_partition(client, database_name, table_name, partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.create_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input + ) + + +def update_partition(client, database_name, table_name, old_values=[], partiton_input=None, **kwargs): + if partiton_input is None: + partiton_input = create_partition_input(database_name, table_name, **kwargs) + return client.update_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionInput=partiton_input, + PartitionValueList=old_values, + ) + + +def get_partition(client, database_name, table_name, values): + return client.get_partition( + DatabaseName=database_name, + TableName=table_name, + PartitionValues=values, + ) diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index a457d5127..72daed28d 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -1,426 +1,426 @@ -from __future__ import unicode_literals - -import sure # noqa -import re -from nose.tools import assert_raises -import boto3 -from botocore.client import ClientError - - -from datetime import datetime -import pytz - -from moto import mock_glue -from . import helpers - - -@mock_glue -def test_create_database(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - response = helpers.get_database(client, database_name) - database = response['Database'] - - database.should.equal({'Name': database_name}) - - -@mock_glue -def test_create_database_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'cantcreatethisdatabasetwice' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.create_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_database(client, database_name) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_create_table(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myspecialtable' - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_table(client, database_name, table_name) - table = response['Table'] - - table['Name'].should.equal(table_input['Name']) - table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_input['PartitionKeys']) - - -@mock_glue -def test_create_table_already_exists(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'cantcreatethistabletwice' - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.create_table(client, database_name, table_name) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_tables(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] - table_inputs = {} - - for table_name in table_names: - table_input = helpers.create_table_input(database_name, table_name) - table_inputs[table_name] = table_input - helpers.create_table(client, database_name, table_name, table_input) - - response = helpers.get_tables(client, database_name) - - tables = response['TableList'] - - tables.should.have.length_of(3) - - for table in tables: - table_name = table['Name'] - table_name.should.equal(table_inputs[table_name]['Name']) - table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) - table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) - - -@mock_glue -def test_get_table_versions(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - table_name = 'myfirsttable' - version_inputs = {} - - table_input = helpers.create_table_input(database_name, table_name) - helpers.create_table(client, database_name, table_name, table_input) - version_inputs["1"] = table_input - - columns = [{'Name': 'country', 'Type': 'string'}] - table_input = helpers.create_table_input(database_name, table_name, columns=columns) - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["2"] = table_input - - # Updateing with an indentical input should still create a new version - helpers.update_table(client, database_name, table_name, table_input) - version_inputs["3"] = table_input - - response = helpers.get_table_versions(client, database_name, table_name) - - vers = response['TableVersions'] - - vers.should.have.length_of(3) - vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) - vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - for n, ver in enumerate(vers): - n = str(n + 1) - ver['VersionId'].should.equal(n) - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) - ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) - - response = helpers.get_table_version(client, database_name, table_name, "3") - ver = response['TableVersion'] - - ver['VersionId'].should.equal("3") - ver['Table']['Name'].should.equal(table_name) - ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) - - -@mock_glue -def test_get_table_version_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "20") - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('version', re.I) - - -@mock_glue -def test_get_table_version_invalid_input(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") - - exc.exception.response['Error']['Code'].should.equal('InvalidInputException') - - -@mock_glue -def test_get_table_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - helpers.create_database(client, database_name) - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') - - -@mock_glue -def test_get_table_when_database_not_exits(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'nosuchdatabase' - - with assert_raises(ClientError) as exc: - helpers.get_table(client, database_name, 'myfirsttable') - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') - - -@mock_glue -def test_get_partitions_empty(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - response['Partitions'].should.have.length_of(0) - - -@mock_glue -def test_create_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - before = datetime.now(pytz.utc) - - part_input = helpers.create_partition_input(database_name, table_name, values=values) - helpers.create_partition(client, database_name, table_name, part_input) - - after = datetime.now(pytz.utc) - - response = client.get_partitions(DatabaseName=database_name, TableName=table_name) - - partitions = response['Partitions'] - - partitions.should.have.length_of(1) - - partition = partitions[0] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) - partition['Values'].should.equal(values) - partition['CreationTime'].should.be.greater_than(before) - partition['CreationTime'].should.be.lower_than(after) - - -@mock_glue -def test_create_partition_already_exist(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - helpers.create_partition(client, database_name, table_name, values=values) - - with assert_raises(ClientError) as exc: - helpers.create_partition(client, database_name, table_name, values=values) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_get_partition_not_found(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_get_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) - - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['Values'].should.equal(values[1]) - - -@mock_glue -def test_update_partition_not_found_moving(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_not_found_change_in_place(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values, values=values) - - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - exc.exception.response['Error']['Message'].should.match('partition') - - -@mock_glue -def test_update_partition_cannot_overwrite(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - helpers.create_database(client, database_name) - - helpers.create_table(client, database_name, table_name) - - values = [['2018-10-01'], ['2018-09-01']] - - helpers.create_partition(client, database_name, table_name, values=values[0]) - helpers.create_partition(client, database_name, table_name, values=values[1]) - - with assert_raises(ClientError) as exc: - helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) - - exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') - - -@mock_glue -def test_update_partition(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) - - -@mock_glue -def test_update_partition_move(): - client = boto3.client('glue', region_name='us-east-1') - database_name = 'myspecialdatabase' - table_name = 'myfirsttable' - values = ['2018-10-01'] - new_values = ['2018-09-01'] - - helpers.create_database(client, database_name) - helpers.create_table(client, database_name, table_name) - helpers.create_partition(client, database_name, table_name, values=values) - - response = helpers.update_partition( - client, - database_name, - table_name, - old_values=values, - values=new_values, - columns=[{'Name': 'country', 'Type': 'string'}], - ) - - with assert_raises(ClientError) as exc: - helpers.get_partition(client, database_name, table_name, values) - - # Old partition shouldn't exist anymore - exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') - - response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) - partition = response['Partition'] - - partition['TableName'].should.equal(table_name) - partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) +from __future__ import unicode_literals + +import sure # noqa +import re +from nose.tools import assert_raises +import boto3 +from botocore.client import ClientError + + +from datetime import datetime +import pytz + +from moto import mock_glue +from . import helpers + + +@mock_glue +def test_create_database(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + response = helpers.get_database(client, database_name) + database = response['Database'] + + database.should.equal({'Name': database_name}) + + +@mock_glue +def test_create_database_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'cantcreatethisdatabasetwice' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.create_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_database(client, database_name) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_create_table(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myspecialtable' + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_table(client, database_name, table_name) + table = response['Table'] + + table['Name'].should.equal(table_input['Name']) + table['StorageDescriptor'].should.equal(table_input['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_input['PartitionKeys']) + + +@mock_glue +def test_create_table_already_exists(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'cantcreatethistabletwice' + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.create_table(client, database_name, table_name) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_tables(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_names = ['myfirsttable', 'mysecondtable', 'mythirdtable'] + table_inputs = {} + + for table_name in table_names: + table_input = helpers.create_table_input(database_name, table_name) + table_inputs[table_name] = table_input + helpers.create_table(client, database_name, table_name, table_input) + + response = helpers.get_tables(client, database_name) + + tables = response['TableList'] + + tables.should.have.length_of(3) + + for table in tables: + table_name = table['Name'] + table_name.should.equal(table_inputs[table_name]['Name']) + table['StorageDescriptor'].should.equal(table_inputs[table_name]['StorageDescriptor']) + table['PartitionKeys'].should.equal(table_inputs[table_name]['PartitionKeys']) + + +@mock_glue +def test_get_table_versions(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + table_name = 'myfirsttable' + version_inputs = {} + + table_input = helpers.create_table_input(database_name, table_name) + helpers.create_table(client, database_name, table_name, table_input) + version_inputs["1"] = table_input + + columns = [{'Name': 'country', 'Type': 'string'}] + table_input = helpers.create_table_input(database_name, table_name, columns=columns) + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["2"] = table_input + + # Updateing with an indentical input should still create a new version + helpers.update_table(client, database_name, table_name, table_input) + version_inputs["3"] = table_input + + response = helpers.get_table_versions(client, database_name, table_name) + + vers = response['TableVersions'] + + vers.should.have.length_of(3) + vers[0]['Table']['StorageDescriptor']['Columns'].should.equal([]) + vers[-1]['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + for n, ver in enumerate(vers): + n = str(n + 1) + ver['VersionId'].should.equal(n) + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor'].should.equal(version_inputs[n]['StorageDescriptor']) + ver['Table']['PartitionKeys'].should.equal(version_inputs[n]['PartitionKeys']) + + response = helpers.get_table_version(client, database_name, table_name, "3") + ver = response['TableVersion'] + + ver['VersionId'].should.equal("3") + ver['Table']['Name'].should.equal(table_name) + ver['Table']['StorageDescriptor']['Columns'].should.equal(columns) + + +@mock_glue +def test_get_table_version_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "20") + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('version', re.I) + + +@mock_glue +def test_get_table_version_invalid_input(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_table_version(client, database_name, 'myfirsttable', "10not-an-int") + + exc.exception.response['Error']['Code'].should.equal('InvalidInputException') + + +@mock_glue +def test_get_table_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + helpers.create_database(client, database_name) + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Table myfirsttable not found') + + +@mock_glue +def test_get_table_when_database_not_exits(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'nosuchdatabase' + + with assert_raises(ClientError) as exc: + helpers.get_table(client, database_name, 'myfirsttable') + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('Database nosuchdatabase not found') + + +@mock_glue +def test_get_partitions_empty(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + response['Partitions'].should.have.length_of(0) + + +@mock_glue +def test_create_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + before = datetime.now(pytz.utc) + + part_input = helpers.create_partition_input(database_name, table_name, values=values) + helpers.create_partition(client, database_name, table_name, part_input) + + after = datetime.now(pytz.utc) + + response = client.get_partitions(DatabaseName=database_name, TableName=table_name) + + partitions = response['Partitions'] + + partitions.should.have.length_of(1) + + partition = partitions[0] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor'].should.equal(part_input['StorageDescriptor']) + partition['Values'].should.equal(values) + partition['CreationTime'].should.be.greater_than(before) + partition['CreationTime'].should.be.lower_than(after) + + +@mock_glue +def test_create_partition_already_exist(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + helpers.create_partition(client, database_name, table_name, values=values) + + with assert_raises(ClientError) as exc: + helpers.create_partition(client, database_name, table_name, values=values) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_get_partition_not_found(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_get_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values[1]) + + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['Values'].should.equal(values[1]) + + +@mock_glue +def test_update_partition_not_found_moving(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=['0000-00-00'], values=['2018-10-02']) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_not_found_change_in_place(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values, values=values) + + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + exc.exception.response['Error']['Message'].should.match('partition') + + +@mock_glue +def test_update_partition_cannot_overwrite(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + helpers.create_database(client, database_name) + + helpers.create_table(client, database_name, table_name) + + values = [['2018-10-01'], ['2018-09-01']] + + helpers.create_partition(client, database_name, table_name, values=values[0]) + helpers.create_partition(client, database_name, table_name, values=values[1]) + + with assert_raises(ClientError) as exc: + helpers.update_partition(client, database_name, table_name, old_values=values[0], values=values[1]) + + exc.exception.response['Error']['Code'].should.equal('AlreadyExistsException') + + +@mock_glue +def test_update_partition(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) + + +@mock_glue +def test_update_partition_move(): + client = boto3.client('glue', region_name='us-east-1') + database_name = 'myspecialdatabase' + table_name = 'myfirsttable' + values = ['2018-10-01'] + new_values = ['2018-09-01'] + + helpers.create_database(client, database_name) + helpers.create_table(client, database_name, table_name) + helpers.create_partition(client, database_name, table_name, values=values) + + response = helpers.update_partition( + client, + database_name, + table_name, + old_values=values, + values=new_values, + columns=[{'Name': 'country', 'Type': 'string'}], + ) + + with assert_raises(ClientError) as exc: + helpers.get_partition(client, database_name, table_name, values) + + # Old partition shouldn't exist anymore + exc.exception.response['Error']['Code'].should.equal('EntityNotFoundException') + + response = client.get_partition(DatabaseName=database_name, TableName=table_name, PartitionValues=new_values) + partition = response['Partition'] + + partition['TableName'].should.equal(table_name) + partition['StorageDescriptor']['Columns'].should.equal([{'Name': 'country', 'Type': 'string'}]) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index bc23ff712..1db4dae1e 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -1,760 +1,760 @@ -from __future__ import unicode_literals -import base64 - -import boto -import boto3 -import sure # noqa -from boto.exception import BotoServerError -from botocore.exceptions import ClientError -from moto import mock_iam, mock_iam_deprecated -from moto.iam.models import aws_managed_policies -from nose.tools import assert_raises, assert_equals -from nose.tools import raises - -from tests.helpers import requires_boto_gte - - -@mock_iam_deprecated() -def test_get_all_server_certs(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - certs = conn.get_all_server_certs()['list_server_certificates_response'][ - 'list_server_certificates_result']['server_certificate_metadata_list'] - certs.should.have.length_of(1) - cert1 = certs[0] - cert1.server_certificate_name.should.equal("certname") - cert1.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_get_server_cert_doesnt_exist(): - conn = boto.connect_iam() - - with assert_raises(BotoServerError): - conn.get_server_certificate("NonExistant") - - -@mock_iam_deprecated() -def test_get_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - cert = conn.get_server_certificate("certname") - cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_upload_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - cert = conn.get_server_certificate("certname") - cert.server_certificate_name.should.equal("certname") - cert.arn.should.equal( - "arn:aws:iam::123456789012:server-certificate/certname") - - -@mock_iam_deprecated() -def test_delete_server_cert(): - conn = boto.connect_iam() - - conn.upload_server_cert("certname", "certbody", "privatekey") - conn.get_server_certificate("certname") - conn.delete_server_cert("certname") - with assert_raises(BotoServerError): - conn.get_server_certificate("certname") - with assert_raises(BotoServerError): - conn.delete_server_cert("certname") - - -@mock_iam_deprecated() -@raises(BotoServerError) -def test_get_role__should_throw__when_role_does_not_exist(): - conn = boto.connect_iam() - - conn.get_role('unexisting_role') - - -@mock_iam_deprecated() -@raises(BotoServerError) -def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): - conn = boto.connect_iam() - - conn.get_instance_profile('unexisting_instance_profile') - - -@mock_iam_deprecated() -def test_create_role_and_instance_profile(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - - conn.add_role_to_instance_profile("my-profile", "my-role") - - role = conn.get_role("my-role") - role.path.should.equal("my-path") - role.assume_role_policy_document.should.equal("some policy") - - profile = conn.get_instance_profile("my-profile") - profile.path.should.equal("my-path") - role_from_profile = list(profile.roles.values())[0] - role_from_profile['role_id'].should.equal(role.role_id) - role_from_profile['role_name'].should.equal("my-role") - - conn.list_roles().roles[0].role_name.should.equal('my-role') - - -@mock_iam_deprecated() -def test_remove_role_from_instance_profile(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - conn.add_role_to_instance_profile("my-profile", "my-role") - - profile = conn.get_instance_profile("my-profile") - role_from_profile = list(profile.roles.values())[0] - role_from_profile['role_name'].should.equal("my-role") - - conn.remove_role_from_instance_profile("my-profile", "my-role") - - profile = conn.get_instance_profile("my-profile") - dict(profile.roles).should.be.empty - - -@mock_iam() -def test_get_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='my-pass') - - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile']['UserName'].should.equal('my-user') - - -@mock_iam() -def test_update_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='my-pass') - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile'].get('PasswordResetRequired').should.equal(None) - - conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) - response = conn.get_login_profile(UserName='my-user') - response['LoginProfile'].get('PasswordResetRequired').should.equal(True) - - -@mock_iam() -def test_delete_role(): - conn = boto3.client('iam', region_name='us-east-1') - - with assert_raises(ClientError): - conn.delete_role(RoleName="my-role") - - conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") - role = conn.get_role(RoleName="my-role") - role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role') - - conn.delete_role(RoleName="my-role") - - with assert_raises(ClientError): - conn.get_role(RoleName="my-role") - - -@mock_iam_deprecated() -def test_list_instance_profiles(): - conn = boto.connect_iam() - conn.create_instance_profile("my-profile", path="my-path") - conn.create_role("my-role", path="my-path") - - conn.add_role_to_instance_profile("my-profile", "my-role") - - profiles = conn.list_instance_profiles().instance_profiles - - len(profiles).should.equal(1) - profiles[0].instance_profile_name.should.equal("my-profile") - profiles[0].roles.role_name.should.equal("my-role") - - -@mock_iam_deprecated() -def test_list_instance_profiles_for_role(): - conn = boto.connect_iam() - - conn.create_role(role_name="my-role", - assume_role_policy_document="some policy", path="my-path") - conn.create_role(role_name="my-role2", - assume_role_policy_document="some policy2", path="my-path2") - - profile_name_list = ['my-profile', 'my-profile2'] - profile_path_list = ['my-path', 'my-path2'] - for profile_count in range(0, 2): - conn.create_instance_profile( - profile_name_list[profile_count], path=profile_path_list[profile_count]) - - for profile_count in range(0, 2): - conn.add_role_to_instance_profile( - profile_name_list[profile_count], "my-role") - - profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") - profile_list = profile_dump['list_instance_profiles_for_role_response'][ - 'list_instance_profiles_for_role_result']['instance_profiles'] - for profile_count in range(0, len(profile_list)): - profile_name_list.remove(profile_list[profile_count][ - "instance_profile_name"]) - profile_path_list.remove(profile_list[profile_count]["path"]) - profile_list[profile_count]["roles"]["member"][ - "role_name"].should.equal("my-role") - - len(profile_name_list).should.equal(0) - len(profile_path_list).should.equal(0) - - profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") - profile_list = profile_dump2['list_instance_profiles_for_role_response'][ - 'list_instance_profiles_for_role_result']['instance_profiles'] - len(profile_list).should.equal(0) - - -@mock_iam_deprecated() -def test_list_role_policies(): - conn = boto.connect_iam() - conn.create_role("my-role") - conn.put_role_policy("my-role", "test policy", "my policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(1) - role.policy_names[0].should.equal("test policy") - - conn.put_role_policy("my-role", "test policy 2", "another policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(2) - - conn.delete_role_policy("my-role", "test policy") - role = conn.list_role_policies("my-role") - role.policy_names.should.have.length_of(1) - role.policy_names[0].should.equal("test policy 2") - - with assert_raises(BotoServerError): - conn.delete_role_policy("my-role", "test policy") - - -@mock_iam_deprecated() -def test_put_role_policy(): - conn = boto.connect_iam() - conn.create_role( - "my-role", assume_role_policy_document="some policy", path="my-path") - conn.put_role_policy("my-role", "test policy", "my policy") - policy = conn.get_role_policy( - "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] - policy.should.equal("test policy") - - -@mock_iam_deprecated() -def test_update_assume_role_policy(): - conn = boto.connect_iam() - role = conn.create_role("my-role") - conn.update_assume_role_policy(role.role_name, "my-policy") - role = conn.get_role("my-role") - role.assume_role_policy_document.should.equal("my-policy") - - -@mock_iam -def test_create_policy(): - conn = boto3.client('iam', region_name='us-east-1') - response = conn.create_policy( - PolicyName="TestCreatePolicy", - PolicyDocument='{"some":"policy"}') - response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") - - -@mock_iam -def test_create_policy_versions(): - conn = boto3.client('iam', region_name='us-east-1') - with assert_raises(ClientError): - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - conn.create_policy( - PolicyName="TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version = conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", - PolicyDocument='{"some":"policy"}') - version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) - -@mock_iam -def test_get_policy(): - conn = boto3.client('iam', region_name='us-east-1') - response = conn.create_policy( - PolicyName="TestGetPolicy", - PolicyDocument='{"some":"policy"}') - policy = conn.get_policy( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") - response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") - - -@mock_iam -def test_get_policy_version(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_policy( - PolicyName="TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') - version = conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - PolicyDocument='{"some":"policy"}') - with assert_raises(ClientError): - conn.get_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - VersionId='v2-does-not-exist') - retrieved = conn.get_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", - VersionId=version.get('PolicyVersion').get('VersionId')) - retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) - - -@mock_iam -def test_list_policy_versions(): - conn = boto3.client('iam', region_name='us-east-1') - with assert_raises(ClientError): - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - conn.create_policy( - PolicyName="TestListPolicyVersions", - PolicyDocument='{"first":"policy"}') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - versions.get('Versions')[0].get('VersionId').should.equal('v1') - - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"second":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", - PolicyDocument='{"third":"policy"}') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") - print(versions.get('Versions')) - versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) - versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) - - -@mock_iam -def test_delete_policy_version(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_policy( - PolicyName="TestDeletePolicyVersion", - PolicyDocument='{"first":"policy"}') - conn.create_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - PolicyDocument='{"second":"policy"}') - with assert_raises(ClientError): - conn.delete_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v2-nope-this-does-not-exist') - conn.delete_policy_version( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", - VersionId='v2') - versions = conn.list_policy_versions( - PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") - len(versions.get('Versions')).should.equal(1) - - -@mock_iam_deprecated() -def test_create_user(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.create_user('my-user') - - -@mock_iam_deprecated() -def test_get_user(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.get_user('my-user') - conn.create_user('my-user') - conn.get_user('my-user') - - -@mock_iam_deprecated() -def test_get_current_user(): - """If no user is specific, IAM returns the current user""" - conn = boto.connect_iam() - user = conn.get_user()['get_user_response']['get_user_result']['user'] - user['user_name'].should.equal('default_user') - - -@mock_iam() -def test_list_users(): - path_prefix = '/' - max_items = 10 - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) - user = response['Users'][0] - user['UserName'].should.equal('my-user') - user['Path'].should.equal('/') - user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') - - -@mock_iam() -def test_user_policies(): - policy_name = 'UserManagedPolicy' - policy_document = "{'mypolicy': 'test'}" - user_name = 'my-user' - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName=user_name) - conn.put_user_policy( - UserName=user_name, - PolicyName=policy_name, - PolicyDocument=policy_document - ) - - policy_doc = conn.get_user_policy( - UserName=user_name, - PolicyName=policy_name - ) - test = policy_document in policy_doc['PolicyDocument'] - test.should.equal(True) - - policies = conn.list_user_policies(UserName=user_name) - len(policies['PolicyNames']).should.equal(1) - policies['PolicyNames'][0].should.equal(policy_name) - - conn.delete_user_policy( - UserName=user_name, - PolicyName=policy_name - ) - - policies = conn.list_user_policies(UserName=user_name) - len(policies['PolicyNames']).should.equal(0) - - -@mock_iam_deprecated() -def test_create_login_profile(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.create_login_profile('my-user', 'my-pass') - conn.create_user('my-user') - conn.create_login_profile('my-user', 'my-pass') - with assert_raises(BotoServerError): - conn.create_login_profile('my-user', 'my-pass') - - -@mock_iam_deprecated() -def test_delete_login_profile(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.delete_login_profile('my-user') - conn.create_login_profile('my-user', 'my-pass') - conn.delete_login_profile('my-user') - - -@mock_iam_deprecated() -def test_create_access_key(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.create_access_key('my-user') - conn.create_user('my-user') - conn.create_access_key('my-user') - - -@mock_iam_deprecated() -def test_get_all_access_keys(): - """If no access keys exist there should be none in the response, - if an access key is present it should have the correct fields present""" - conn = boto.connect_iam() - conn.create_user('my-user') - response = conn.get_all_access_keys('my-user') - assert_equals( - response['list_access_keys_response'][ - 'list_access_keys_result']['access_key_metadata'], - [] - ) - conn.create_access_key('my-user') - response = conn.get_all_access_keys('my-user') - assert_equals( - sorted(response['list_access_keys_response'][ - 'list_access_keys_result']['access_key_metadata'][0].keys()), - sorted(['status', 'create_date', 'user_name', 'access_key_id']) - ) - - -@mock_iam_deprecated() -def test_delete_access_key(): - conn = boto.connect_iam() - conn.create_user('my-user') - access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ - 'create_access_key_result']['access_key']['access_key_id'] - conn.delete_access_key(access_key_id, 'my-user') - - -@mock_iam() -def test_mfa_devices(): - # Test enable device - conn = boto3.client('iam', region_name='us-east-1') - conn.create_user(UserName='my-user') - conn.enable_mfa_device( - UserName='my-user', - SerialNumber='123456789', - AuthenticationCode1='234567', - AuthenticationCode2='987654' - ) - - # Test list mfa devices - response = conn.list_mfa_devices(UserName='my-user') - device = response['MFADevices'][0] - device['SerialNumber'].should.equal('123456789') - - # Test deactivate mfa device - conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789') - response = conn.list_mfa_devices(UserName='my-user') - len(response['MFADevices']).should.equal(0) - - -@mock_iam_deprecated() -def test_delete_user(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.delete_user('my-user') - conn.create_user('my-user') - conn.delete_user('my-user') - - -@mock_iam_deprecated() -def test_generate_credential_report(): - conn = boto.connect_iam() - result = conn.generate_credential_report() - result['generate_credential_report_response'][ - 'generate_credential_report_result']['state'].should.equal('STARTED') - result = conn.generate_credential_report() - result['generate_credential_report_response'][ - 'generate_credential_report_result']['state'].should.equal('COMPLETE') - - -@mock_iam_deprecated() -def test_get_credential_report(): - conn = boto.connect_iam() - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.get_credential_report() - result = conn.generate_credential_report() - while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': - result = conn.generate_credential_report() - result = conn.get_credential_report() - report = base64.b64decode(result['get_credential_report_response'][ - 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') - report.should.match(r'.*my-user.*') - - -@requires_boto_gte('2.39') -@mock_iam_deprecated() -def test_managed_policy(): - conn = boto.connect_iam() - - conn.create_policy(policy_name='UserManagedPolicy', - policy_document={'mypolicy': 'test'}, - path='/mypolicy/', - description='my user managed policy') - - marker = 0 - aws_policies = [] - while marker is not None: - response = conn.list_policies(scope='AWS', marker=marker)[ - 'list_policies_response']['list_policies_result'] - for policy in response['policies']: - aws_policies.append(policy) - marker = response.get('marker') - set(p.name for p in aws_managed_policies).should.equal( - set(p['policy_name'] for p in aws_policies)) - - user_policies = conn.list_policies(scope='Local')['list_policies_response'][ - 'list_policies_result']['policies'] - set(['UserManagedPolicy']).should.equal( - set(p['policy_name'] for p in user_policies)) - - marker = 0 - all_policies = [] - while marker is not None: - response = conn.list_policies(marker=marker)[ - 'list_policies_response']['list_policies_result'] - for policy in response['policies']: - all_policies.append(policy) - marker = response.get('marker') - set(p['policy_name'] for p in aws_policies + - user_policies).should.equal(set(p['policy_name'] for p in all_policies)) - - role_name = 'my-role' - conn.create_role(role_name, assume_role_policy_document={ - 'policy': 'test'}, path="my-path") - for policy_name in ['AmazonElasticMapReduceRole', - 'AmazonElasticMapReduceforEC2Role']: - policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name - conn.attach_role_policy(policy_arn, role_name) - - rows = conn.list_policies(only_attached=True)['list_policies_response'][ - 'list_policies_result']['policies'] - rows.should.have.length_of(2) - for x in rows: - int(x['attachment_count']).should.be.greater_than(0) - - # boto has not implemented this end point but accessible this way - resp = conn.get_response('ListAttachedRolePolicies', - {'RoleName': role_name}, - list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ - 'attached_policies'].should.have.length_of(2) - - conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", - role_name) - rows = conn.list_policies(only_attached=True)['list_policies_response'][ - 'list_policies_result']['policies'] - rows.should.have.length_of(1) - for x in rows: - int(x['attachment_count']).should.be.greater_than(0) - - # boto has not implemented this end point but accessible this way - resp = conn.get_response('ListAttachedRolePolicies', - {'RoleName': role_name}, - list_marker='AttachedPolicies') - resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ - 'attached_policies'].should.have.length_of(1) - - with assert_raises(BotoServerError): - conn.detach_role_policy( - "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", - role_name) - - with assert_raises(BotoServerError): - conn.detach_role_policy( - "arn:aws:iam::aws:policy/Nonexistent", role_name) - - -@mock_iam -def test_boto3_create_login_profile(): - conn = boto3.client('iam', region_name='us-east-1') - - with assert_raises(ClientError): - conn.create_login_profile(UserName='my-user', Password='Password') - - conn.create_user(UserName='my-user') - conn.create_login_profile(UserName='my-user', Password='Password') - - with assert_raises(ClientError): - conn.create_login_profile(UserName='my-user', Password='Password') - - -@mock_iam() -def test_attach_detach_user_policy(): - iam = boto3.resource('iam', region_name='us-east-1') - client = boto3.client('iam', region_name='us-east-1') - - user = iam.create_user(UserName='test-user') - - policy_name = 'UserAttachedPolicy' - policy = iam.create_policy(PolicyName=policy_name, - PolicyDocument='{"mypolicy": "test"}', - Path='/mypolicy/', - Description='my user attached policy') - - client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) - - resp = client.list_attached_user_policies(UserName=user.name) - resp['AttachedPolicies'].should.have.length_of(1) - attached_policy = resp['AttachedPolicies'][0] - attached_policy['PolicyArn'].should.equal(policy.arn) - attached_policy['PolicyName'].should.equal(policy_name) - - client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) - - resp = client.list_attached_user_policies(UserName=user.name) - resp['AttachedPolicies'].should.have.length_of(0) - - -@mock_iam -def test_update_access_key(): - iam = boto3.resource('iam', region_name='us-east-1') - client = iam.meta.client - username = 'test-user' - iam.create_user(UserName=username) - with assert_raises(ClientError): - client.update_access_key(UserName=username, - AccessKeyId='non-existent-key', - Status='Inactive') - key = client.create_access_key(UserName=username)['AccessKey'] - client.update_access_key(UserName=username, - AccessKeyId=key['AccessKeyId'], - Status='Inactive') - resp = client.list_access_keys(UserName=username) - resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') - - -@mock_iam -def test_get_account_authorization_details(): - import json - conn = boto3.client('iam', region_name='us-east-1') - conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") - conn.create_user(Path='/', UserName='testCloudAuxUser') - conn.create_group(Path='/', GroupName='testCloudAuxGroup') - conn.create_policy( - PolicyName='testCloudAuxPolicy', - Path='/', - PolicyDocument=json.dumps({ - "Version": "2012-10-17", - "Statement": [ - { - "Action": "s3:ListBucket", - "Resource": "*", - "Effect": "Allow", - } - ] - }), - Description='Test CloudAux Policy' - ) - - result = conn.get_account_authorization_details(Filter=['Role']) - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['User']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['Group']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 1 - len(result['Policies']) == 0 - - result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) == 1 - - # Check for greater than 1 since this should always be greater than one but might change. - # See iam/aws_managed_policies.py - result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) - len(result['RoleDetailList']) == 0 - len(result['UserDetailList']) == 0 - len(result['GroupDetailList']) == 0 - len(result['Policies']) > 1 - - result = conn.get_account_authorization_details() - len(result['RoleDetailList']) == 1 - len(result['UserDetailList']) == 1 - len(result['GroupDetailList']) == 1 - len(result['Policies']) > 1 - - - +from __future__ import unicode_literals +import base64 + +import boto +import boto3 +import sure # noqa +from boto.exception import BotoServerError +from botocore.exceptions import ClientError +from moto import mock_iam, mock_iam_deprecated +from moto.iam.models import aws_managed_policies +from nose.tools import assert_raises, assert_equals +from nose.tools import raises + +from tests.helpers import requires_boto_gte + + +@mock_iam_deprecated() +def test_get_all_server_certs(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + certs = conn.get_all_server_certs()['list_server_certificates_response'][ + 'list_server_certificates_result']['server_certificate_metadata_list'] + certs.should.have.length_of(1) + cert1 = certs[0] + cert1.server_certificate_name.should.equal("certname") + cert1.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_get_server_cert_doesnt_exist(): + conn = boto.connect_iam() + + with assert_raises(BotoServerError): + conn.get_server_certificate("NonExistant") + + +@mock_iam_deprecated() +def test_get_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + cert = conn.get_server_certificate("certname") + cert.server_certificate_name.should.equal("certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_upload_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + cert = conn.get_server_certificate("certname") + cert.server_certificate_name.should.equal("certname") + cert.arn.should.equal( + "arn:aws:iam::123456789012:server-certificate/certname") + + +@mock_iam_deprecated() +def test_delete_server_cert(): + conn = boto.connect_iam() + + conn.upload_server_cert("certname", "certbody", "privatekey") + conn.get_server_certificate("certname") + conn.delete_server_cert("certname") + with assert_raises(BotoServerError): + conn.get_server_certificate("certname") + with assert_raises(BotoServerError): + conn.delete_server_cert("certname") + + +@mock_iam_deprecated() +@raises(BotoServerError) +def test_get_role__should_throw__when_role_does_not_exist(): + conn = boto.connect_iam() + + conn.get_role('unexisting_role') + + +@mock_iam_deprecated() +@raises(BotoServerError) +def test_get_instance_profile__should_throw__when_instance_profile_does_not_exist(): + conn = boto.connect_iam() + + conn.get_instance_profile('unexisting_instance_profile') + + +@mock_iam_deprecated() +def test_create_role_and_instance_profile(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + + conn.add_role_to_instance_profile("my-profile", "my-role") + + role = conn.get_role("my-role") + role.path.should.equal("my-path") + role.assume_role_policy_document.should.equal("some policy") + + profile = conn.get_instance_profile("my-profile") + profile.path.should.equal("my-path") + role_from_profile = list(profile.roles.values())[0] + role_from_profile['role_id'].should.equal(role.role_id) + role_from_profile['role_name'].should.equal("my-role") + + conn.list_roles().roles[0].role_name.should.equal('my-role') + + +@mock_iam_deprecated() +def test_remove_role_from_instance_profile(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + conn.add_role_to_instance_profile("my-profile", "my-role") + + profile = conn.get_instance_profile("my-profile") + role_from_profile = list(profile.roles.values())[0] + role_from_profile['role_name'].should.equal("my-role") + + conn.remove_role_from_instance_profile("my-profile", "my-role") + + profile = conn.get_instance_profile("my-profile") + dict(profile.roles).should.be.empty + + +@mock_iam() +def test_get_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile']['UserName'].should.equal('my-user') + + +@mock_iam() +def test_update_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='my-pass') + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(None) + + conn.update_login_profile(UserName='my-user', Password='new-pass', PasswordResetRequired=True) + response = conn.get_login_profile(UserName='my-user') + response['LoginProfile'].get('PasswordResetRequired').should.equal(True) + + +@mock_iam() +def test_delete_role(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.delete_role(RoleName="my-role") + + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + role = conn.get_role(RoleName="my-role") + role.get('Role').get('Arn').should.equal('arn:aws:iam::123456789012:role/my-path/my-role') + + conn.delete_role(RoleName="my-role") + + with assert_raises(ClientError): + conn.get_role(RoleName="my-role") + + +@mock_iam_deprecated() +def test_list_instance_profiles(): + conn = boto.connect_iam() + conn.create_instance_profile("my-profile", path="my-path") + conn.create_role("my-role", path="my-path") + + conn.add_role_to_instance_profile("my-profile", "my-role") + + profiles = conn.list_instance_profiles().instance_profiles + + len(profiles).should.equal(1) + profiles[0].instance_profile_name.should.equal("my-profile") + profiles[0].roles.role_name.should.equal("my-role") + + +@mock_iam_deprecated() +def test_list_instance_profiles_for_role(): + conn = boto.connect_iam() + + conn.create_role(role_name="my-role", + assume_role_policy_document="some policy", path="my-path") + conn.create_role(role_name="my-role2", + assume_role_policy_document="some policy2", path="my-path2") + + profile_name_list = ['my-profile', 'my-profile2'] + profile_path_list = ['my-path', 'my-path2'] + for profile_count in range(0, 2): + conn.create_instance_profile( + profile_name_list[profile_count], path=profile_path_list[profile_count]) + + for profile_count in range(0, 2): + conn.add_role_to_instance_profile( + profile_name_list[profile_count], "my-role") + + profile_dump = conn.list_instance_profiles_for_role(role_name="my-role") + profile_list = profile_dump['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] + for profile_count in range(0, len(profile_list)): + profile_name_list.remove(profile_list[profile_count][ + "instance_profile_name"]) + profile_path_list.remove(profile_list[profile_count]["path"]) + profile_list[profile_count]["roles"]["member"][ + "role_name"].should.equal("my-role") + + len(profile_name_list).should.equal(0) + len(profile_path_list).should.equal(0) + + profile_dump2 = conn.list_instance_profiles_for_role(role_name="my-role2") + profile_list = profile_dump2['list_instance_profiles_for_role_response'][ + 'list_instance_profiles_for_role_result']['instance_profiles'] + len(profile_list).should.equal(0) + + +@mock_iam_deprecated() +def test_list_role_policies(): + conn = boto.connect_iam() + conn.create_role("my-role") + conn.put_role_policy("my-role", "test policy", "my policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) + role.policy_names[0].should.equal("test policy") + + conn.put_role_policy("my-role", "test policy 2", "another policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(2) + + conn.delete_role_policy("my-role", "test policy") + role = conn.list_role_policies("my-role") + role.policy_names.should.have.length_of(1) + role.policy_names[0].should.equal("test policy 2") + + with assert_raises(BotoServerError): + conn.delete_role_policy("my-role", "test policy") + + +@mock_iam_deprecated() +def test_put_role_policy(): + conn = boto.connect_iam() + conn.create_role( + "my-role", assume_role_policy_document="some policy", path="my-path") + conn.put_role_policy("my-role", "test policy", "my policy") + policy = conn.get_role_policy( + "my-role", "test policy")['get_role_policy_response']['get_role_policy_result']['policy_name'] + policy.should.equal("test policy") + + +@mock_iam_deprecated() +def test_update_assume_role_policy(): + conn = boto.connect_iam() + role = conn.create_role("my-role") + conn.update_assume_role_policy(role.role_name, "my-policy") + role = conn.get_role("my-role") + role.assume_role_policy_document.should.equal("my-policy") + + +@mock_iam +def test_create_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument='{"some":"policy"}') + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestCreatePolicy") + + +@mock_iam +def test_create_policy_versions(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError): + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + conn.create_policy( + PolicyName="TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestCreatePolicyVersion", + PolicyDocument='{"some":"policy"}') + version.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + +@mock_iam +def test_get_policy(): + conn = boto3.client('iam', region_name='us-east-1') + response = conn.create_policy( + PolicyName="TestGetPolicy", + PolicyDocument='{"some":"policy"}') + policy = conn.get_policy( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicy") + response['Policy']['Arn'].should.equal("arn:aws:iam::123456789012:policy/TestGetPolicy") + + +@mock_iam +def test_get_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestGetPolicyVersion", + PolicyDocument='{"some":"policy"}') + version = conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + PolicyDocument='{"some":"policy"}') + with assert_raises(ClientError): + conn.get_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + VersionId='v2-does-not-exist') + retrieved = conn.get_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestGetPolicyVersion", + VersionId=version.get('PolicyVersion').get('VersionId')) + retrieved.get('PolicyVersion').get('Document').should.equal({'some': 'policy'}) + + +@mock_iam +def test_list_policy_versions(): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError): + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + conn.create_policy( + PolicyName="TestListPolicyVersions", + PolicyDocument='{"first":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + versions.get('Versions')[0].get('VersionId').should.equal('v1') + + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"second":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions", + PolicyDocument='{"third":"policy"}') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestListPolicyVersions") + print(versions.get('Versions')) + versions.get('Versions')[1].get('Document').should.equal({'second': 'policy'}) + versions.get('Versions')[2].get('Document').should.equal({'third': 'policy'}) + + +@mock_iam +def test_delete_policy_version(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestDeletePolicyVersion", + PolicyDocument='{"first":"policy"}') + conn.create_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + PolicyDocument='{"second":"policy"}') + with assert_raises(ClientError): + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v2-nope-this-does-not-exist') + conn.delete_policy_version( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion", + VersionId='v2') + versions = conn.list_policy_versions( + PolicyArn="arn:aws:iam::123456789012:policy/TestDeletePolicyVersion") + len(versions.get('Versions')).should.equal(1) + + +@mock_iam_deprecated() +def test_create_user(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.create_user('my-user') + + +@mock_iam_deprecated() +def test_get_user(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.get_user('my-user') + conn.create_user('my-user') + conn.get_user('my-user') + + +@mock_iam_deprecated() +def test_get_current_user(): + """If no user is specific, IAM returns the current user""" + conn = boto.connect_iam() + user = conn.get_user()['get_user_response']['get_user_result']['user'] + user['user_name'].should.equal('default_user') + + +@mock_iam() +def test_list_users(): + path_prefix = '/' + max_items = 10 + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + response = conn.list_users(PathPrefix=path_prefix, MaxItems=max_items) + user = response['Users'][0] + user['UserName'].should.equal('my-user') + user['Path'].should.equal('/') + user['Arn'].should.equal('arn:aws:iam::123456789012:user/my-user') + + +@mock_iam() +def test_user_policies(): + policy_name = 'UserManagedPolicy' + policy_document = "{'mypolicy': 'test'}" + user_name = 'my-user' + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName=user_name) + conn.put_user_policy( + UserName=user_name, + PolicyName=policy_name, + PolicyDocument=policy_document + ) + + policy_doc = conn.get_user_policy( + UserName=user_name, + PolicyName=policy_name + ) + test = policy_document in policy_doc['PolicyDocument'] + test.should.equal(True) + + policies = conn.list_user_policies(UserName=user_name) + len(policies['PolicyNames']).should.equal(1) + policies['PolicyNames'][0].should.equal(policy_name) + + conn.delete_user_policy( + UserName=user_name, + PolicyName=policy_name + ) + + policies = conn.list_user_policies(UserName=user_name) + len(policies['PolicyNames']).should.equal(0) + + +@mock_iam_deprecated() +def test_create_login_profile(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.create_login_profile('my-user', 'my-pass') + conn.create_user('my-user') + conn.create_login_profile('my-user', 'my-pass') + with assert_raises(BotoServerError): + conn.create_login_profile('my-user', 'my-pass') + + +@mock_iam_deprecated() +def test_delete_login_profile(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.delete_login_profile('my-user') + conn.create_login_profile('my-user', 'my-pass') + conn.delete_login_profile('my-user') + + +@mock_iam_deprecated() +def test_create_access_key(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.create_access_key('my-user') + conn.create_user('my-user') + conn.create_access_key('my-user') + + +@mock_iam_deprecated() +def test_get_all_access_keys(): + """If no access keys exist there should be none in the response, + if an access key is present it should have the correct fields present""" + conn = boto.connect_iam() + conn.create_user('my-user') + response = conn.get_all_access_keys('my-user') + assert_equals( + response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'], + [] + ) + conn.create_access_key('my-user') + response = conn.get_all_access_keys('my-user') + assert_equals( + sorted(response['list_access_keys_response'][ + 'list_access_keys_result']['access_key_metadata'][0].keys()), + sorted(['status', 'create_date', 'user_name', 'access_key_id']) + ) + + +@mock_iam_deprecated() +def test_delete_access_key(): + conn = boto.connect_iam() + conn.create_user('my-user') + access_key_id = conn.create_access_key('my-user')['create_access_key_response'][ + 'create_access_key_result']['access_key']['access_key_id'] + conn.delete_access_key(access_key_id, 'my-user') + + +@mock_iam() +def test_mfa_devices(): + # Test enable device + conn = boto3.client('iam', region_name='us-east-1') + conn.create_user(UserName='my-user') + conn.enable_mfa_device( + UserName='my-user', + SerialNumber='123456789', + AuthenticationCode1='234567', + AuthenticationCode2='987654' + ) + + # Test list mfa devices + response = conn.list_mfa_devices(UserName='my-user') + device = response['MFADevices'][0] + device['SerialNumber'].should.equal('123456789') + + # Test deactivate mfa device + conn.deactivate_mfa_device(UserName='my-user', SerialNumber='123456789') + response = conn.list_mfa_devices(UserName='my-user') + len(response['MFADevices']).should.equal(0) + + +@mock_iam_deprecated() +def test_delete_user(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.delete_user('my-user') + conn.create_user('my-user') + conn.delete_user('my-user') + + +@mock_iam_deprecated() +def test_generate_credential_report(): + conn = boto.connect_iam() + result = conn.generate_credential_report() + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('STARTED') + result = conn.generate_credential_report() + result['generate_credential_report_response'][ + 'generate_credential_report_result']['state'].should.equal('COMPLETE') + + +@mock_iam_deprecated() +def test_get_credential_report(): + conn = boto.connect_iam() + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.get_credential_report() + result = conn.generate_credential_report() + while result['generate_credential_report_response']['generate_credential_report_result']['state'] != 'COMPLETE': + result = conn.generate_credential_report() + result = conn.get_credential_report() + report = base64.b64decode(result['get_credential_report_response'][ + 'get_credential_report_result']['content'].encode('ascii')).decode('ascii') + report.should.match(r'.*my-user.*') + + +@requires_boto_gte('2.39') +@mock_iam_deprecated() +def test_managed_policy(): + conn = boto.connect_iam() + + conn.create_policy(policy_name='UserManagedPolicy', + policy_document={'mypolicy': 'test'}, + path='/mypolicy/', + description='my user managed policy') + + marker = 0 + aws_policies = [] + while marker is not None: + response = conn.list_policies(scope='AWS', marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + aws_policies.append(policy) + marker = response.get('marker') + set(p.name for p in aws_managed_policies).should.equal( + set(p['policy_name'] for p in aws_policies)) + + user_policies = conn.list_policies(scope='Local')['list_policies_response'][ + 'list_policies_result']['policies'] + set(['UserManagedPolicy']).should.equal( + set(p['policy_name'] for p in user_policies)) + + marker = 0 + all_policies = [] + while marker is not None: + response = conn.list_policies(marker=marker)[ + 'list_policies_response']['list_policies_result'] + for policy in response['policies']: + all_policies.append(policy) + marker = response.get('marker') + set(p['policy_name'] for p in aws_policies + + user_policies).should.equal(set(p['policy_name'] for p in all_policies)) + + role_name = 'my-role' + conn.create_role(role_name, assume_role_policy_document={ + 'policy': 'test'}, path="my-path") + for policy_name in ['AmazonElasticMapReduceRole', + 'AmazonElasticMapReduceforEC2Role']: + policy_arn = 'arn:aws:iam::aws:policy/service-role/' + policy_name + conn.attach_role_policy(policy_arn, role_name) + + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] + rows.should.have.length_of(2) + for x in rows: + int(x['attachment_count']).should.be.greater_than(0) + + # boto has not implemented this end point but accessible this way + resp = conn.get_response('ListAttachedRolePolicies', + {'RoleName': role_name}, + list_marker='AttachedPolicies') + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(2) + + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + rows = conn.list_policies(only_attached=True)['list_policies_response'][ + 'list_policies_result']['policies'] + rows.should.have.length_of(1) + for x in rows: + int(x['attachment_count']).should.be.greater_than(0) + + # boto has not implemented this end point but accessible this way + resp = conn.get_response('ListAttachedRolePolicies', + {'RoleName': role_name}, + list_marker='AttachedPolicies') + resp['list_attached_role_policies_response']['list_attached_role_policies_result'][ + 'attached_policies'].should.have.length_of(1) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceRole", + role_name) + + with assert_raises(BotoServerError): + conn.detach_role_policy( + "arn:aws:iam::aws:policy/Nonexistent", role_name) + + +@mock_iam +def test_boto3_create_login_profile(): + conn = boto3.client('iam', region_name='us-east-1') + + with assert_raises(ClientError): + conn.create_login_profile(UserName='my-user', Password='Password') + + conn.create_user(UserName='my-user') + conn.create_login_profile(UserName='my-user', Password='Password') + + with assert_raises(ClientError): + conn.create_login_profile(UserName='my-user', Password='Password') + + +@mock_iam() +def test_attach_detach_user_policy(): + iam = boto3.resource('iam', region_name='us-east-1') + client = boto3.client('iam', region_name='us-east-1') + + user = iam.create_user(UserName='test-user') + + policy_name = 'UserAttachedPolicy' + policy = iam.create_policy(PolicyName=policy_name, + PolicyDocument='{"mypolicy": "test"}', + Path='/mypolicy/', + Description='my user attached policy') + + client.attach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(1) + attached_policy = resp['AttachedPolicies'][0] + attached_policy['PolicyArn'].should.equal(policy.arn) + attached_policy['PolicyName'].should.equal(policy_name) + + client.detach_user_policy(UserName=user.name, PolicyArn=policy.arn) + + resp = client.list_attached_user_policies(UserName=user.name) + resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') + + +@mock_iam +def test_get_account_authorization_details(): + import json + conn = boto3.client('iam', region_name='us-east-1') + conn.create_role(RoleName="my-role", AssumeRolePolicyDocument="some policy", Path="/my-path/") + conn.create_user(Path='/', UserName='testCloudAuxUser') + conn.create_group(Path='/', GroupName='testCloudAuxGroup') + conn.create_policy( + PolicyName='testCloudAuxPolicy', + Path='/', + PolicyDocument=json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "s3:ListBucket", + "Resource": "*", + "Effect": "Allow", + } + ] + }), + Description='Test CloudAux Policy' + ) + + result = conn.get_account_authorization_details(Filter=['Role']) + len(result['RoleDetailList']) == 1 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['User']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 1 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['Group']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 1 + len(result['Policies']) == 0 + + result = conn.get_account_authorization_details(Filter=['LocalManagedPolicy']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) == 1 + + # Check for greater than 1 since this should always be greater than one but might change. + # See iam/aws_managed_policies.py + result = conn.get_account_authorization_details(Filter=['AWSManagedPolicy']) + len(result['RoleDetailList']) == 0 + len(result['UserDetailList']) == 0 + len(result['GroupDetailList']) == 0 + len(result['Policies']) > 1 + + result = conn.get_account_authorization_details() + len(result['RoleDetailList']) == 1 + len(result['UserDetailList']) == 1 + len(result['GroupDetailList']) == 1 + len(result['Policies']) > 1 + + + diff --git a/tests/test_iam/test_iam_account_aliases.py b/tests/test_iam/test_iam_account_aliases.py index 3d927038d..5d7dec408 100644 --- a/tests/test_iam/test_iam_account_aliases.py +++ b/tests/test_iam/test_iam_account_aliases.py @@ -1,20 +1,20 @@ -import boto3 -import sure # noqa -from moto import mock_iam - - -@mock_iam() -def test_account_aliases(): - client = boto3.client('iam', region_name='us-east-1') - - alias = 'my-account-name' - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([]) - - client.create_account_alias(AccountAlias=alias) - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([alias]) - - client.delete_account_alias(AccountAlias=alias) - aliases = client.list_account_aliases() - aliases.should.have.key('AccountAliases').which.should.equal([]) +import boto3 +import sure # noqa +from moto import mock_iam + + +@mock_iam() +def test_account_aliases(): + client = boto3.client('iam', region_name='us-east-1') + + alias = 'my-account-name' + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) + + client.create_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([alias]) + + client.delete_account_alias(AccountAlias=alias) + aliases = client.list_account_aliases() + aliases.should.have.key('AccountAliases').which.should.equal([]) diff --git a/tests/test_iam/test_iam_groups.py b/tests/test_iam/test_iam_groups.py index 0d4756f75..87d4123e2 100644 --- a/tests/test_iam/test_iam_groups.py +++ b/tests/test_iam/test_iam_groups.py @@ -1,155 +1,155 @@ -from __future__ import unicode_literals - -from datetime import datetime - -import boto -import boto3 -import sure # noqa - -from nose.tools import assert_raises -from boto.exception import BotoServerError -from moto import mock_iam, mock_iam_deprecated - - -@mock_iam_deprecated() -def test_create_group(): - conn = boto.connect_iam() - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.create_group('my-group') - - -@mock_iam_deprecated() -def test_get_group(): - conn = boto.connect_iam() - conn.create_group('my-group') - conn.get_group('my-group') - with assert_raises(BotoServerError): - conn.get_group('not-group') - - -@mock_iam() -def test_get_group_current(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - result = conn.get_group(GroupName='my-group') - - assert result['Group']['Path'] == '/' - assert result['Group']['GroupName'] == 'my-group' - assert isinstance(result['Group']['CreateDate'], datetime) - assert result['Group']['GroupId'] - assert result['Group']['Arn'] == 'arn:aws:iam::123456789012:group/my-group' - assert not result['Users'] - - # Make a group with a different path: - other_group = conn.create_group(GroupName='my-other-group', Path='some/location') - assert other_group['Group']['Path'] == 'some/location' - assert other_group['Group']['Arn'] == 'arn:aws:iam::123456789012:group/some/location/my-other-group' - - -@mock_iam_deprecated() -def test_get_all_groups(): - conn = boto.connect_iam() - conn.create_group('my-group1') - conn.create_group('my-group2') - groups = conn.get_all_groups()['list_groups_response'][ - 'list_groups_result']['groups'] - groups.should.have.length_of(2) - - -@mock_iam_deprecated() -def test_add_user_to_group(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.add_user_to_group('my-group', 'my-user') - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.add_user_to_group('my-group', 'my-user') - conn.create_user('my-user') - conn.add_user_to_group('my-group', 'my-user') - - -@mock_iam_deprecated() -def test_remove_user_from_group(): - conn = boto.connect_iam() - with assert_raises(BotoServerError): - conn.remove_user_from_group('my-group', 'my-user') - conn.create_group('my-group') - conn.create_user('my-user') - with assert_raises(BotoServerError): - conn.remove_user_from_group('my-group', 'my-user') - conn.add_user_to_group('my-group', 'my-user') - conn.remove_user_from_group('my-group', 'my-user') - - -@mock_iam_deprecated() -def test_get_groups_for_user(): - conn = boto.connect_iam() - conn.create_group('my-group1') - conn.create_group('my-group2') - conn.create_group('other-group') - conn.create_user('my-user') - conn.add_user_to_group('my-group1', 'my-user') - conn.add_user_to_group('my-group2', 'my-user') - - groups = conn.get_groups_for_user( - 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] - groups.should.have.length_of(2) - - -@mock_iam_deprecated() -def test_put_group_policy(): - conn = boto.connect_iam() - conn.create_group('my-group') - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - - -@mock_iam -def test_attach_group_policies(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - policy_arn = 'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role' - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - conn.attach_group_policy(GroupName='my-group', PolicyArn=policy_arn) - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.equal( - [ - { - 'PolicyName': 'AmazonElasticMapReduceforEC2Role', - 'PolicyArn': policy_arn, - } - ]) - - conn.detach_group_policy(GroupName='my-group', PolicyArn=policy_arn) - conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty - - -@mock_iam_deprecated() -def test_get_group_policy(): - conn = boto.connect_iam() - conn.create_group('my-group') - with assert_raises(BotoServerError): - conn.get_group_policy('my-group', 'my-policy') - - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - conn.get_group_policy('my-group', 'my-policy') - - -@mock_iam_deprecated() -def test_get_all_group_policies(): - conn = boto.connect_iam() - conn.create_group('my-group') - policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] - assert policies == [] - conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') - policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] - assert policies == ['my-policy'] - - -@mock_iam() -def test_list_group_policies(): - conn = boto3.client('iam', region_name='us-east-1') - conn.create_group(GroupName='my-group') - conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty - conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') - conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) +from __future__ import unicode_literals + +from datetime import datetime + +import boto +import boto3 +import sure # noqa + +from nose.tools import assert_raises +from boto.exception import BotoServerError +from moto import mock_iam, mock_iam_deprecated + + +@mock_iam_deprecated() +def test_create_group(): + conn = boto.connect_iam() + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.create_group('my-group') + + +@mock_iam_deprecated() +def test_get_group(): + conn = boto.connect_iam() + conn.create_group('my-group') + conn.get_group('my-group') + with assert_raises(BotoServerError): + conn.get_group('not-group') + + +@mock_iam() +def test_get_group_current(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + result = conn.get_group(GroupName='my-group') + + assert result['Group']['Path'] == '/' + assert result['Group']['GroupName'] == 'my-group' + assert isinstance(result['Group']['CreateDate'], datetime) + assert result['Group']['GroupId'] + assert result['Group']['Arn'] == 'arn:aws:iam::123456789012:group/my-group' + assert not result['Users'] + + # Make a group with a different path: + other_group = conn.create_group(GroupName='my-other-group', Path='some/location') + assert other_group['Group']['Path'] == 'some/location' + assert other_group['Group']['Arn'] == 'arn:aws:iam::123456789012:group/some/location/my-other-group' + + +@mock_iam_deprecated() +def test_get_all_groups(): + conn = boto.connect_iam() + conn.create_group('my-group1') + conn.create_group('my-group2') + groups = conn.get_all_groups()['list_groups_response'][ + 'list_groups_result']['groups'] + groups.should.have.length_of(2) + + +@mock_iam_deprecated() +def test_add_user_to_group(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.add_user_to_group('my-group', 'my-user') + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.add_user_to_group('my-group', 'my-user') + conn.create_user('my-user') + conn.add_user_to_group('my-group', 'my-user') + + +@mock_iam_deprecated() +def test_remove_user_from_group(): + conn = boto.connect_iam() + with assert_raises(BotoServerError): + conn.remove_user_from_group('my-group', 'my-user') + conn.create_group('my-group') + conn.create_user('my-user') + with assert_raises(BotoServerError): + conn.remove_user_from_group('my-group', 'my-user') + conn.add_user_to_group('my-group', 'my-user') + conn.remove_user_from_group('my-group', 'my-user') + + +@mock_iam_deprecated() +def test_get_groups_for_user(): + conn = boto.connect_iam() + conn.create_group('my-group1') + conn.create_group('my-group2') + conn.create_group('other-group') + conn.create_user('my-user') + conn.add_user_to_group('my-group1', 'my-user') + conn.add_user_to_group('my-group2', 'my-user') + + groups = conn.get_groups_for_user( + 'my-user')['list_groups_for_user_response']['list_groups_for_user_result']['groups'] + groups.should.have.length_of(2) + + +@mock_iam_deprecated() +def test_put_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + + +@mock_iam +def test_attach_group_policies(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + policy_arn = 'arn:aws:iam::aws:policy/service-role/AmazonElasticMapReduceforEC2Role' + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + conn.attach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.equal( + [ + { + 'PolicyName': 'AmazonElasticMapReduceforEC2Role', + 'PolicyArn': policy_arn, + } + ]) + + conn.detach_group_policy(GroupName='my-group', PolicyArn=policy_arn) + conn.list_attached_group_policies(GroupName='my-group')['AttachedPolicies'].should.be.empty + + +@mock_iam_deprecated() +def test_get_group_policy(): + conn = boto.connect_iam() + conn.create_group('my-group') + with assert_raises(BotoServerError): + conn.get_group_policy('my-group', 'my-policy') + + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + conn.get_group_policy('my-group', 'my-policy') + + +@mock_iam_deprecated() +def test_get_all_group_policies(): + conn = boto.connect_iam() + conn.create_group('my-group') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == [] + conn.put_group_policy('my-group', 'my-policy', '{"some": "json"}') + policies = conn.get_all_group_policies('my-group')['list_group_policies_response']['list_group_policies_result']['policy_names'] + assert policies == ['my-policy'] + + +@mock_iam() +def test_list_group_policies(): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_group(GroupName='my-group') + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.be.empty + conn.put_group_policy(GroupName='my-group', PolicyName='my-policy', PolicyDocument='{"some": "json"}') + conn.list_group_policies(GroupName='my-group')['PolicyNames'].should.equal(['my-policy']) diff --git a/tests/test_iam/test_server.py b/tests/test_iam/test_server.py index 59aaf1462..80c15b59d 100644 --- a/tests/test_iam/test_server.py +++ b/tests/test_iam/test_server.py @@ -1,26 +1,26 @@ -from __future__ import unicode_literals - -import json - -import re -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_iam_server_get(): - backend = server.create_backend_app("iam") - test_client = backend.test_client() - - group_data = test_client.action_data( - "CreateGroup", GroupName="test group", Path="/") - group_id = re.search("(.*)", group_data).groups()[0] - - groups_data = test_client.action_data("ListGroups") - groups_ids = re.findall("(.*)", groups_data) - - assert group_id in groups_ids +from __future__ import unicode_literals + +import json + +import re +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_iam_server_get(): + backend = server.create_backend_app("iam") + test_client = backend.test_client() + + group_data = test_client.action_data( + "CreateGroup", GroupName="test group", Path="/") + group_id = re.search("(.*)", group_data).groups()[0] + + groups_data = test_client.action_data("ListGroups") + groups_ids = re.findall("(.*)", groups_data) + + assert group_id in groups_ids diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 758ff8940..92fb3dfd0 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,876 +1,876 @@ -from __future__ import unicode_literals - -import json -import sure #noqa -import boto3 - -from moto import mock_iot - - -@mock_iot -def test_attach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - -@mock_iot -def test_detach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.be.empty - - -@mock_iot -def test_list_attached_policies(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - policies = client.list_attached_policies(target=cert['certificateArn']) - policies['policies'].should.be.empty - - -@mock_iot -def test_policy_versions(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) - - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), - setAsDefault=True) - policy1.should.have.key('policyArn').which.should_not.be.none - policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy1.should.have.key('policyVersionId').which.should.equal('2') - policy1.should.have.key('isDefaultVersion').which.should.equal(True) - - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), - setAsDefault=False) - policy2.should.have.key('policyArn').which.should_not.be.none - policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy2.should.have.key('policyVersionId').which.should.equal('3') - policy2.should.have.key('isDefaultVersion').which.should.equal(False) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) - - client.delete_policy_version(policyName=policy_name, policyVersionId='1') - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) - - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) - - # should fail as it's the default policy. Should use delete_policy instead - try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - assert False, 'Should have failed in previous call' - except Exception as exception: - exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') - - -@mock_iot -def test_things(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - type_name = 'my-type-name' - - # thing type - thing_type = client.create_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeArn') - - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(1) - for thing_type in res['thingTypes']: - thing_type.should.have.key('thingTypeName').which.should_not.be.none - - thing_type = client.describe_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeProperties') - thing_type.should.have.key('thingTypeMetadata') - - # thing - thing = client.create_thing(thingName=name, thingTypeName=type_name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - - thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') - - thing = client.describe_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('defaultClientId') - thing.should.have.key('thingTypeName') - thing.should.have.key('attributes') - thing.should.have.key('version') - - # delete thing - client.delete_thing(thingName=name) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(0) - - # delete thing type - client.delete_thing_type(thingTypeName=type_name) - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(0) - - -@mock_iot -def test_list_thing_types(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 100): - client.create_thing_type(thingTypeName=str(i + 1)) - - thing_types = client.list_thing_types() - thing_types.should.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') - - thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types.should_not.have.key('nextToken') - thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') - - -@mock_iot -def test_list_thing_types_with_typename_filter(): - client = boto3.client('iot', region_name='ap-northeast-1') - - client.create_thing_type(thingTypeName='thing') - client.create_thing_type(thingTypeName='thingType') - client.create_thing_type(thingTypeName='thingTypeName') - client.create_thing_type(thingTypeName='thingTypeNameGroup') - client.create_thing_type(thingTypeName='shouldNotFind') - client.create_thing_type(thingTypeName='find me it shall not') - - thing_types = client.list_thing_types(thingTypeName='thing') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(4) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - thing_types = client.list_thing_types(thingTypeName='thingTypeName') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(2) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - -@mock_iot -def test_list_things_with_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 200): - client.create_thing(thingName=str(i + 1)) - - things = client.list_things() - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('1') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') - things['things'][-1]['thingName'].should.equal('50') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('51') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('101') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - - things = client.list_things(nextToken=things['nextToken']) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('151') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - - -@mock_iot -def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - client.create_thing_type(thingTypeName='my-thing-type') - - for i in range(0, 200): - if not (i + 1) % 3: - attribute_payload = { - 'attributes': { - 'foo': 'bar' - } - } - elif not (i + 1) % 5: - attribute_payload = { - 'attributes': { - 'bar': 'foo' - } - } - else: - attribute_payload = {} - - if not (i + 1) % 2: - thing_type_name = 'my-thing-type' - client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) - else: - client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) - - # Test filter for thingTypeName - things = client.list_things(thingTypeName=thing_type_name) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('2') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('102') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - # Test filter for attributes - things = client.list_things(attributeName='foo', attributeValue='bar') - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('3') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(16) - things['things'][0]['thingName'].should.equal('153') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - # Test filter for attributes and thingTypeName - things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(33) - things['things'][0]['thingName'].should.equal('6') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) - - -@mock_iot -def test_certs(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificatePem').which.should_not.be.none - cert.should.have.key('keyPair') - cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none - cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('certificateArn').which.should_not.be.none - cert_desc.should.have.key('certificateId').which.should_not.be.none - cert_desc.should.have.key('certificatePem').which.should_not.be.none - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) - - -@mock_iot -def test_certs_create_inactive(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=False) - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('INACTIVE') - - client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - -@mock_iot -def test_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=name) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('defaultVersionId').which.should.equal('1') - - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - client.delete_policy(policyName=name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_principal_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_principal_thing(): - client = boto3.client('iot', region_name='ap-northeast-1') - thing_name = 'my-thing' - thing = client.create_thing(thingName=thing_name) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should_not.be.none - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(0) - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_thing_groups(): - client = boto3.client('iot', region_name='ap-northeast-1') - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(1) - for thing_group in res['thingGroups']: - thing_group.should.have.key('groupName').which.should_not.be.none - thing_group.should.have.key('groupArn').which.should_not.be.none - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupProperties') - thing_group.should.have.key('thingGroupMetadata') - thing_group.should.have.key('version') - - # delete thing group - client.delete_thing_group(thingGroupName=group_name) - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(0) - - # props create test - props = { - 'thingGroupDescription': 'my first thing group', - 'attributePayload': { - 'attributes': { - 'key1': 'val01', - 'Key02': 'VAL2' - } - } - } - thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - # props update test with merge - new_props = { - 'attributePayload': { - 'attributes': { - 'k3': 'v3' - }, - 'merge': True - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - res_props.should.have.key('k3').which.should.equal('v3') - - # props update test - new_props = { - 'attributePayload': { - 'attributes': { - 'k4': 'v4' - } - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('k4').which.should.equal('v4') - res_props.should_not.have.key('key1') - - -@mock_iot -def test_thing_group_relations(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # add in 4 way - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - thing_groups = client.list_thing_groups_for_thing( - thingName=name - ) - thing_groups.should.have.key('thingGroups') - thing_groups['thingGroups'].should.have.length_of(1) - - # remove in 4 way - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - # update thing group for thing - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToAdd=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToRemove=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - -@mock_iot -def test_create_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - -@mock_iot -def test_describe_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('documentSource') - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_describe_job_1(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_get_job_document_with_document_source(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') - - -@mock_iot -def test_get_job_document_with_document(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") +from __future__ import unicode_literals + +import json +import sure #noqa +import boto3 + +from moto import mock_iot + + +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_list_thing_types(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 100): + client.create_thing_type(thingTypeName=str(i + 1)) + + thing_types = client.list_thing_types() + thing_types.should.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') + + thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types.should_not.have.key('nextToken') + thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') + + +@mock_iot +def test_list_thing_types_with_typename_filter(): + client = boto3.client('iot', region_name='ap-northeast-1') + + client.create_thing_type(thingTypeName='thing') + client.create_thing_type(thingTypeName='thingType') + client.create_thing_type(thingTypeName='thingTypeName') + client.create_thing_type(thingTypeName='thingTypeNameGroup') + client.create_thing_type(thingTypeName='shouldNotFind') + client.create_thing_type(thingTypeName='find me it shall not') + + thing_types = client.list_thing_types(thingTypeName='thing') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(4) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + thing_types = client.list_thing_types(thingTypeName='thingTypeName') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(2) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + +@mock_iot +def test_list_things_with_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 200): + client.create_thing(thingName=str(i + 1)) + + things = client.list_things() + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('1') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') + things['things'][-1]['thingName'].should.equal('50') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('51') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('101') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + + things = client.list_things(nextToken=things['nextToken']) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('151') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + + +@mock_iot +def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + client.create_thing_type(thingTypeName='my-thing-type') + + for i in range(0, 200): + if not (i + 1) % 3: + attribute_payload = { + 'attributes': { + 'foo': 'bar' + } + } + elif not (i + 1) % 5: + attribute_payload = { + 'attributes': { + 'bar': 'foo' + } + } + else: + attribute_payload = {} + + if not (i + 1) % 2: + thing_type_name = 'my-thing-type' + client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) + else: + client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) + + # Test filter for thingTypeName + things = client.list_things(thingTypeName=thing_type_name) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('2') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('102') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + # Test filter for attributes + things = client.list_things(attributeName='foo', attributeValue='bar') + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('3') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(16) + things['things'][0]['thingName'].should.equal('153') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + # Test filter for attributes and thingTypeName + things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(33) + things['things'][0]['thingName'].should.equal('6') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + +@mock_iot +def test_create_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + +@mock_iot +def test_describe_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('documentSource') + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_describe_job_1(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") diff --git a/tests/test_iot/test_server.py b/tests/test_iot/test_server.py index 47091531a..60e81435a 100644 --- a/tests/test_iot/test_server.py +++ b/tests/test_iot/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_iot - -''' -Test the different server responses -''' - -@mock_iot -def test_iot_list(): - backend = server.create_backend_app("iot") - test_client = backend.test_client() - - # just making sure that server is up - res = test_client.get('/things') - res.status_code.should.equal(404) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iot + +''' +Test the different server responses +''' + +@mock_iot +def test_iot_list(): + backend = server.create_backend_app("iot") + test_client = backend.test_client() + + # just making sure that server is up + res = test_client.get('/things') + res.status_code.should.equal(404) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py index 09c1ada4c..8c03521f1 100644 --- a/tests/test_iotdata/test_iotdata.py +++ b/tests/test_iotdata/test_iotdata.py @@ -1,93 +1,93 @@ -from __future__ import unicode_literals - -import json -import boto3 -import sure # noqa -from nose.tools import assert_raises -from botocore.exceptions import ClientError -from moto import mock_iotdata, mock_iot - - -@mock_iot -@mock_iotdata -def test_basic(): - iot_client = boto3.client('iot', region_name='ap-northeast-1') - client = boto3.client('iot-data', region_name='ap-northeast-1') - name = 'my-thing' - raw_payload = b'{"state": {"desired": {"led": "on"}}}' - iot_client.create_thing(thingName=name) - - with assert_raises(ClientError): - client.get_thing_shadow(thingName=name) - - res = client.update_thing_shadow(thingName=name, payload=raw_payload) - - payload = json.loads(res['payload'].read()) - expected_state = '{"desired": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - client.delete_thing_shadow(thingName=name) - with assert_raises(ClientError): - client.get_thing_shadow(thingName=name) - - -@mock_iot -@mock_iotdata -def test_update(): - iot_client = boto3.client('iot', region_name='ap-northeast-1') - client = boto3.client('iot-data', region_name='ap-northeast-1') - name = 'my-thing' - raw_payload = b'{"state": {"desired": {"led": "on"}}}' - iot_client.create_thing(thingName=name) - - # first update - res = client.update_thing_shadow(thingName=name, payload=raw_payload) - payload = json.loads(res['payload'].read()) - expected_state = '{"desired": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(1) - payload.should.have.key('timestamp') - - # reporting new state - new_payload = b'{"state": {"reported": {"led": "on"}}}' - res = client.update_thing_shadow(thingName=name, payload=new_payload) - payload = json.loads(res['payload'].read()) - expected_state = '{"reported": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(2) - payload.should.have.key('timestamp') - - res = client.get_thing_shadow(thingName=name) - payload = json.loads(res['payload'].read()) - expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' - payload.should.have.key('state').which.should.equal(json.loads(expected_state)) - payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') - payload.should.have.key('version').which.should.equal(2) - payload.should.have.key('timestamp') - - -@mock_iotdata -def test_publish(): - client = boto3.client('iot-data', region_name='ap-northeast-1') - client.publish(topic='test/topic', qos=1, payload=b'') +from __future__ import unicode_literals + +import json +import boto3 +import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError +from moto import mock_iotdata, mock_iot + + +@mock_iot +@mock_iotdata +def test_basic(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + client.delete_thing_shadow(thingName=name) + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + +@mock_iot +@mock_iotdata +def test_update(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + # first update + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + # reporting new state + new_payload = b'{"state": {"reported": {"led": "on"}}}' + res = client.update_thing_shadow(thingName=name, payload=new_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + +@mock_iotdata +def test_publish(): + client = boto3.client('iot-data', region_name='ap-northeast-1') + client.publish(topic='test/topic', qos=1, payload=b'') diff --git a/tests/test_iotdata/test_server.py b/tests/test_iotdata/test_server.py index 42a5c5f22..edcd92a33 100644 --- a/tests/test_iotdata/test_server.py +++ b/tests/test_iotdata/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_iotdata - -''' -Test the different server responses -''' - -@mock_iotdata -def test_iotdata_list(): - backend = server.create_backend_app("iot-data") - test_client = backend.test_client() - - # just making sure that server is up - thing_name = 'nothing' - res = test_client.get('/things/{}/shadow'.format(thing_name)) - res.status_code.should.equal(404) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iotdata + +''' +Test the different server responses +''' + +@mock_iotdata +def test_iotdata_list(): + backend = server.create_backend_app("iot-data") + test_client = backend.test_client() + + # just making sure that server is up + thing_name = 'nothing' + res = test_client.get('/things/{}/shadow'.format(thing_name)) + res.status_code.should.equal(404) diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 6ab46c6f9..b13672e26 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -1,188 +1,188 @@ -from __future__ import unicode_literals - -import datetime - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa - -from moto import mock_kinesis - - -def create_stream(client, stream_name): - return client.create_delivery_stream( - DeliveryStreamName=stream_name, - RedshiftDestinationConfiguration={ - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', - 'CopyCommand': { - 'DataTableName': 'outputTable', - 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" - }, - 'Username': 'username', - 'Password': 'password', - 'S3Configuration': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - } - ) - - -@mock_kinesis -def test_create_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - response = create_stream(client, 'stream1') - stream_arn = response['DeliveryStreamARN'] - - response = client.describe_delivery_stream(DeliveryStreamName='stream1') - stream_description = response['DeliveryStreamDescription'] - - # Sure and Freezegun don't play nicely together - _ = stream_description.pop('CreateTimestamp') - _ = stream_description.pop('LastUpdateTimestamp') - - stream_description.should.equal({ - 'DeliveryStreamName': 'stream1', - 'DeliveryStreamARN': stream_arn, - 'DeliveryStreamStatus': 'ACTIVE', - 'VersionId': 'string', - 'Destinations': [ - { - 'DestinationId': 'string', - 'RedshiftDestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', - 'CopyCommand': { - 'DataTableName': 'outputTable', - 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" - }, - 'Username': 'username', - 'S3DestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - } - }, - ], - "HasMoreDestinations": False, - }) - - -@mock_kinesis -def test_create_stream_without_redshift(): - client = boto3.client('firehose', region_name='us-east-1') - - response = client.create_delivery_stream( - DeliveryStreamName="stream1", - S3DestinationConfiguration={ - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - ) - stream_arn = response['DeliveryStreamARN'] - - response = client.describe_delivery_stream(DeliveryStreamName='stream1') - stream_description = response['DeliveryStreamDescription'] - - # Sure and Freezegun don't play nicely together - _ = stream_description.pop('CreateTimestamp') - _ = stream_description.pop('LastUpdateTimestamp') - - stream_description.should.equal({ - 'DeliveryStreamName': 'stream1', - 'DeliveryStreamARN': stream_arn, - 'DeliveryStreamStatus': 'ACTIVE', - 'VersionId': 'string', - 'Destinations': [ - { - 'DestinationId': 'string', - 'S3DestinationDescription': { - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', - 'BucketARN': 'arn:aws:s3:::kinesis-test', - 'Prefix': 'myFolder/', - 'BufferingHints': { - 'SizeInMBs': 123, - 'IntervalInSeconds': 124 - }, - 'CompressionFormat': 'UNCOMPRESSED', - } - }, - ], - "HasMoreDestinations": False, - }) - - -@mock_kinesis -def test_deescribe_non_existant_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - client.describe_delivery_stream.when.called_with( - DeliveryStreamName='not-a-stream').should.throw(ClientError) - - -@mock_kinesis -def test_list_and_delete_stream(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - create_stream(client, 'stream2') - - set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( - set(['stream1', 'stream2'])) - - client.delete_delivery_stream(DeliveryStreamName='stream1') - - set(client.list_delivery_streams()[ - 'DeliveryStreamNames']).should.equal(set(['stream2'])) - - -@mock_kinesis -def test_put_record(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - client.put_record( - DeliveryStreamName='stream1', - Record={ - 'Data': 'some data' - } - ) - - -@mock_kinesis -def test_put_record_batch(): - client = boto3.client('firehose', region_name='us-east-1') - - create_stream(client, 'stream1') - client.put_record_batch( - DeliveryStreamName='stream1', - Records=[ - { - 'Data': 'some data1' - }, - { - 'Data': 'some data2' - }, - ] - ) +from __future__ import unicode_literals + +import datetime + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa + +from moto import mock_kinesis + + +def create_stream(client, stream_name): + return client.create_delivery_stream( + DeliveryStreamName=stream_name, + RedshiftDestinationConfiguration={ + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', + 'CopyCommand': { + 'DataTableName': 'outputTable', + 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" + }, + 'Username': 'username', + 'Password': 'password', + 'S3Configuration': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + } + ) + + +@mock_kinesis +def test_create_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + response = create_stream(client, 'stream1') + stream_arn = response['DeliveryStreamARN'] + + response = client.describe_delivery_stream(DeliveryStreamName='stream1') + stream_description = response['DeliveryStreamDescription'] + + # Sure and Freezegun don't play nicely together + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') + + stream_description.should.equal({ + 'DeliveryStreamName': 'stream1', + 'DeliveryStreamARN': stream_arn, + 'DeliveryStreamStatus': 'ACTIVE', + 'VersionId': 'string', + 'Destinations': [ + { + 'DestinationId': 'string', + 'RedshiftDestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'ClusterJDBCURL': 'jdbc:redshift://host.amazonaws.com:5439/database', + 'CopyCommand': { + 'DataTableName': 'outputTable', + 'CopyOptions': "CSV DELIMITER ',' NULL '\\0'" + }, + 'Username': 'username', + 'S3DestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + } + }, + ], + "HasMoreDestinations": False, + }) + + +@mock_kinesis +def test_create_stream_without_redshift(): + client = boto3.client('firehose', region_name='us-east-1') + + response = client.create_delivery_stream( + DeliveryStreamName="stream1", + S3DestinationConfiguration={ + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + ) + stream_arn = response['DeliveryStreamARN'] + + response = client.describe_delivery_stream(DeliveryStreamName='stream1') + stream_description = response['DeliveryStreamDescription'] + + # Sure and Freezegun don't play nicely together + _ = stream_description.pop('CreateTimestamp') + _ = stream_description.pop('LastUpdateTimestamp') + + stream_description.should.equal({ + 'DeliveryStreamName': 'stream1', + 'DeliveryStreamARN': stream_arn, + 'DeliveryStreamStatus': 'ACTIVE', + 'VersionId': 'string', + 'Destinations': [ + { + 'DestinationId': 'string', + 'S3DestinationDescription': { + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role', + 'BucketARN': 'arn:aws:s3:::kinesis-test', + 'Prefix': 'myFolder/', + 'BufferingHints': { + 'SizeInMBs': 123, + 'IntervalInSeconds': 124 + }, + 'CompressionFormat': 'UNCOMPRESSED', + } + }, + ], + "HasMoreDestinations": False, + }) + + +@mock_kinesis +def test_deescribe_non_existant_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + client.describe_delivery_stream.when.called_with( + DeliveryStreamName='not-a-stream').should.throw(ClientError) + + +@mock_kinesis +def test_list_and_delete_stream(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + create_stream(client, 'stream2') + + set(client.list_delivery_streams()['DeliveryStreamNames']).should.equal( + set(['stream1', 'stream2'])) + + client.delete_delivery_stream(DeliveryStreamName='stream1') + + set(client.list_delivery_streams()[ + 'DeliveryStreamNames']).should.equal(set(['stream2'])) + + +@mock_kinesis +def test_put_record(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + client.put_record( + DeliveryStreamName='stream1', + Record={ + 'Data': 'some data' + } + ) + + +@mock_kinesis +def test_put_record_batch(): + client = boto3.client('firehose', region_name='us-east-1') + + create_stream(client, 'stream1') + client.put_record_batch( + DeliveryStreamName='stream1', + Records=[ + { + 'Data': 'some data1' + }, + { + 'Data': 'some data2' + }, + ] + ) diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index c70236978..736dc05c3 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -1,624 +1,624 @@ -from __future__ import unicode_literals - -import boto.kinesis -from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException -import boto3 -import sure # noqa -import datetime -import time - -from moto import mock_kinesis, mock_kinesis_deprecated - - -@mock_kinesis_deprecated -def test_create_cluster(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("my_stream", 2) - - stream_response = conn.describe_stream("my_stream") - - stream = stream_response["StreamDescription"] - stream["StreamName"].should.equal("my_stream") - stream["HasMoreShards"].should.equal(False) - stream["StreamARN"].should.equal( - "arn:aws:kinesis:us-west-2:123456789012:my_stream") - stream["StreamStatus"].should.equal("ACTIVE") - - shards = stream['Shards'] - shards.should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_describe_non_existant_stream(): - conn = boto.kinesis.connect_to_region("us-east-1") - conn.describe_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_list_and_delete_stream(): - conn = boto.kinesis.connect_to_region("us-west-2") - - conn.create_stream("stream1", 1) - conn.create_stream("stream2", 1) - - conn.list_streams()['StreamNames'].should.have.length_of(2) - - conn.delete_stream("stream2") - - conn.list_streams()['StreamNames'].should.have.length_of(1) - - # Delete invalid id - conn.delete_stream.when.called_with( - "not-a-stream").should.throw(ResourceNotFoundException) - - -@mock_kinesis -def test_list_many_streams(): - conn = boto3.client('kinesis', region_name="us-west-2") - - for i in range(11): - conn.create_stream(StreamName="stream%d" % i, ShardCount=1) - - resp = conn.list_streams() - stream_names = resp["StreamNames"] - has_more_streams = resp["HasMoreStreams"] - stream_names.should.have.length_of(10) - has_more_streams.should.be(True) - resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) - stream_names = resp2["StreamNames"] - has_more_streams = resp2["HasMoreStreams"] - stream_names.should.have.length_of(1) - has_more_streams.should.equal(False) - - -@mock_kinesis_deprecated -def test_basic_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.equal([]) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_invalid_shard_iterator(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.get_shard_iterator.when.called_with( - stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) - - -@mock_kinesis_deprecated -def test_put_records(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - data = "hello world" - partition_key = "1234" - - conn.put_record.when.called_with( - stream_name, data, 1234).should.throw(InvalidArgumentException) - - conn.put_record(stream_name, data, partition_key) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - shard_iterator = response['NextShardIterator'] - response['Records'].should.have.length_of(1) - record = response['Records'][0] - - record["Data"].should.equal("hello world") - record["PartitionKey"].should.equal("1234") - record["SequenceNumber"].should.equal("1") - - -@mock_kinesis_deprecated -def test_get_records_limit(): - conn = boto.kinesis.connect_to_region("us-west-2") - - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - data = "hello world" - - for index in range(5): - conn.put_record(stream_name, data, str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Retrieve only 3 records - response = conn.get_records(shard_iterator, limit=3) - response['Records'].should.have.length_of(3) - - # Then get the rest of the results - next_shard_iterator = response['NextShardIterator'] - response = conn.get_records(next_shard_iterator) - response['Records'].should.have.length_of(2) - - -@mock_kinesis_deprecated -def test_get_records_at_sequence_number(): - # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by - # a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting at that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the second item - response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) - response['Records'][0]['Data'].should.equal('2') - - -@mock_kinesis_deprecated -def test_get_records_after_sequence_number(): - # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted - # by a specific sequence number. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(shard_iterator) - # And the first result returned should be the third item - response['Records'][0]['Data'].should.equal('3') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_get_records_latest(): - # LATEST - Start reading just after the most recent record in the shard, - # so that you always read the most recent data in the shard. - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - # Create some data - for index in range(1, 5): - conn.put_record(stream_name, str(index), str(index)) - - # Get a shard iterator - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - # Get the second record - response = conn.get_records(shard_iterator, limit=2) - second_sequence_id = response['Records'][1]['SequenceNumber'] - - # Then get a new iterator starting after that id - response = conn.get_shard_iterator( - stream_name, shard_id, 'LATEST', second_sequence_id) - shard_iterator = response['ShardIterator'] - - # Write some more data - conn.put_record(stream_name, "last_record", "last_record") - - response = conn.get_records(shard_iterator) - # And the only result returned should be the new item - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('last_record') - response['Records'][0]['Data'].should.equal('last_record') - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_timestamp(): - # AT_TIMESTAMP - Read the first record at or after the specified timestamp - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - for index in range(1, 5): - conn.put_record(StreamName=stream_name, - Data=str(index), - PartitionKey=str(index)) - - # When boto3 floors the timestamp that we pass to get_shard_iterator to - # second precision even though AWS supports ms precision: - # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html - # To test around this limitation we wait until we well into the next second - # before capturing the time and storing the records we expect to retrieve. - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - keys = [str(i) for i in range(5, 10)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_at_very_old_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=1) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(len(keys)) - partition_keys = [r['PartitionKey'] for r in response['Records']] - partition_keys.should.equal(keys) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_timestamp_filtering(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - - time.sleep(1.0) - timestamp = datetime.datetime.utcnow() - - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - response['Records'].should.have.length_of(1) - response['Records'][0]['PartitionKey'].should.equal('1') - response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ - greater_than(timestamp) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_millis_behind_latest(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - conn.put_record(StreamName=stream_name, - Data='0', - PartitionKey='0') - time.sleep(1.0) - conn.put_record(StreamName=stream_name, - Data='1', - PartitionKey='1') - - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='TRIM_HORIZON') - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator, Limit=1) - response['Records'].should.have.length_of(1) - response['MillisBehindLatest'].should.be.greater_than(0) - - -@mock_kinesis -def test_get_records_at_very_new_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - # Create some data - keys = [str(i) for i in range(1, 5)] - for k in keys: - conn.put_record(StreamName=stream_name, - Data=k, - PartitionKey=k) - - timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis -def test_get_records_from_empty_stream_at_timestamp(): - conn = boto3.client('kinesis', region_name="us-west-2") - stream_name = "my_stream" - conn.create_stream(StreamName=stream_name, ShardCount=1) - - timestamp = datetime.datetime.utcnow() - - # Get a shard iterator - response = conn.describe_stream(StreamName=stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator(StreamName=stream_name, - ShardId=shard_id, - ShardIteratorType='AT_TIMESTAMP', - Timestamp=timestamp) - shard_iterator = response['ShardIterator'] - - response = conn.get_records(ShardIterator=shard_iterator) - - response['Records'].should.have.length_of(0) - response['MillisBehindLatest'].should.equal(0) - - -@mock_kinesis_deprecated -def test_invalid_shard_iterator_type(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - response = conn.describe_stream(stream_name) - shard_id = response['StreamDescription']['Shards'][0]['ShardId'] - response = conn.get_shard_iterator.when.called_with( - stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) - - -@mock_kinesis_deprecated -def test_add_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - - -@mock_kinesis_deprecated -def test_list_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val3') - conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val4') - - -@mock_kinesis_deprecated -def test_remove_tags(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = "my_stream" - conn.create_stream(stream_name, 1) - - conn.describe_stream(stream_name) - conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal('val1') - conn.remove_tags_from_stream(stream_name, ['tag1']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag1').should.equal(None) - - conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal('val2') - conn.remove_tags_from_stream(stream_name, ['tag2']) - tags = dict([(tag['Key'], tag['Value']) - for tag in conn.list_tags_for_stream(stream_name)['Tags']]) - tags.get('tag2').should.equal(None) - - -@mock_kinesis_deprecated -def test_split_shard(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 2) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[0]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - shard_range = shards[2]['HashKeyRange'] - new_starting_hash = ( - int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 - conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - -@mock_kinesis_deprecated -def test_merge_shards(): - conn = boto.kinesis.connect_to_region("us-west-2") - stream_name = 'my_stream' - - conn.create_stream(stream_name, 4) - - # Create some data - for index in range(1, 100): - conn.put_record(stream_name, str(index), str(index)) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - - conn.merge_shards.when.called_with( - stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(4) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - - conn.merge_shards(stream_name, 'shardId-000000000000', - 'shardId-000000000001') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(3) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) - conn.merge_shards(stream_name, 'shardId-000000000002', - 'shardId-000000000000') - - stream_response = conn.describe_stream(stream_name) - - stream = stream_response["StreamDescription"] - shards = stream['Shards'] - shards.should.have.length_of(2) - sum([shard['SequenceNumberRange']['EndingSequenceNumber'] - for shard in shards]).should.equal(99) +from __future__ import unicode_literals + +import boto.kinesis +from boto.kinesis.exceptions import ResourceNotFoundException, InvalidArgumentException +import boto3 +import sure # noqa +import datetime +import time + +from moto import mock_kinesis, mock_kinesis_deprecated + + +@mock_kinesis_deprecated +def test_create_cluster(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("my_stream", 2) + + stream_response = conn.describe_stream("my_stream") + + stream = stream_response["StreamDescription"] + stream["StreamName"].should.equal("my_stream") + stream["HasMoreShards"].should.equal(False) + stream["StreamARN"].should.equal( + "arn:aws:kinesis:us-west-2:123456789012:my_stream") + stream["StreamStatus"].should.equal("ACTIVE") + + shards = stream['Shards'] + shards.should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_describe_non_existant_stream(): + conn = boto.kinesis.connect_to_region("us-east-1") + conn.describe_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_list_and_delete_stream(): + conn = boto.kinesis.connect_to_region("us-west-2") + + conn.create_stream("stream1", 1) + conn.create_stream("stream2", 1) + + conn.list_streams()['StreamNames'].should.have.length_of(2) + + conn.delete_stream("stream2") + + conn.list_streams()['StreamNames'].should.have.length_of(1) + + # Delete invalid id + conn.delete_stream.when.called_with( + "not-a-stream").should.throw(ResourceNotFoundException) + + +@mock_kinesis +def test_list_many_streams(): + conn = boto3.client('kinesis', region_name="us-west-2") + + for i in range(11): + conn.create_stream(StreamName="stream%d" % i, ShardCount=1) + + resp = conn.list_streams() + stream_names = resp["StreamNames"] + has_more_streams = resp["HasMoreStreams"] + stream_names.should.have.length_of(10) + has_more_streams.should.be(True) + resp2 = conn.list_streams(ExclusiveStartStreamName=stream_names[-1]) + stream_names = resp2["StreamNames"] + has_more_streams = resp2["HasMoreStreams"] + stream_names.should.have.length_of(1) + has_more_streams.should.equal(False) + + +@mock_kinesis_deprecated +def test_basic_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.equal([]) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_invalid_shard_iterator(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.get_shard_iterator.when.called_with( + stream_name, "123", 'TRIM_HORIZON').should.throw(ResourceNotFoundException) + + +@mock_kinesis_deprecated +def test_put_records(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + data = "hello world" + partition_key = "1234" + + conn.put_record.when.called_with( + stream_name, data, 1234).should.throw(InvalidArgumentException) + + conn.put_record(stream_name, data, partition_key) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + shard_iterator = response['NextShardIterator'] + response['Records'].should.have.length_of(1) + record = response['Records'][0] + + record["Data"].should.equal("hello world") + record["PartitionKey"].should.equal("1234") + record["SequenceNumber"].should.equal("1") + + +@mock_kinesis_deprecated +def test_get_records_limit(): + conn = boto.kinesis.connect_to_region("us-west-2") + + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + data = "hello world" + + for index in range(5): + conn.put_record(stream_name, data, str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Retrieve only 3 records + response = conn.get_records(shard_iterator, limit=3) + response['Records'].should.have.length_of(3) + + # Then get the rest of the results + next_shard_iterator = response['NextShardIterator'] + response = conn.get_records(next_shard_iterator) + response['Records'].should.have.length_of(2) + + +@mock_kinesis_deprecated +def test_get_records_at_sequence_number(): + # AT_SEQUENCE_NUMBER - Start reading exactly from the position denoted by + # a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting at that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AT_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the second item + response['Records'][0]['SequenceNumber'].should.equal(second_sequence_id) + response['Records'][0]['Data'].should.equal('2') + + +@mock_kinesis_deprecated +def test_get_records_after_sequence_number(): + # AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted + # by a specific sequence number. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'AFTER_SEQUENCE_NUMBER', second_sequence_id) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(shard_iterator) + # And the first result returned should be the third item + response['Records'][0]['Data'].should.equal('3') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_get_records_latest(): + # LATEST - Start reading just after the most recent record in the shard, + # so that you always read the most recent data in the shard. + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + # Create some data + for index in range(1, 5): + conn.put_record(stream_name, str(index), str(index)) + + # Get a shard iterator + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(stream_name, shard_id, 'TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + # Get the second record + response = conn.get_records(shard_iterator, limit=2) + second_sequence_id = response['Records'][1]['SequenceNumber'] + + # Then get a new iterator starting after that id + response = conn.get_shard_iterator( + stream_name, shard_id, 'LATEST', second_sequence_id) + shard_iterator = response['ShardIterator'] + + # Write some more data + conn.put_record(stream_name, "last_record", "last_record") + + response = conn.get_records(shard_iterator) + # And the only result returned should be the new item + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('last_record') + response['Records'][0]['Data'].should.equal('last_record') + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_timestamp(): + # AT_TIMESTAMP - Read the first record at or after the specified timestamp + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + for index in range(1, 5): + conn.put_record(StreamName=stream_name, + Data=str(index), + PartitionKey=str(index)) + + # When boto3 floors the timestamp that we pass to get_shard_iterator to + # second precision even though AWS supports ms precision: + # http://docs.aws.amazon.com/kinesis/latest/APIReference/API_GetShardIterator.html + # To test around this limitation we wait until we well into the next second + # before capturing the time and storing the records we expect to retrieve. + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + keys = [str(i) for i in range(5, 10)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_at_very_old_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=1) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(len(keys)) + partition_keys = [r['PartitionKey'] for r in response['Records']] + partition_keys.should.equal(keys) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_timestamp_filtering(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + + time.sleep(1.0) + timestamp = datetime.datetime.utcnow() + + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + response['Records'].should.have.length_of(1) + response['Records'][0]['PartitionKey'].should.equal('1') + response['Records'][0]['ApproximateArrivalTimestamp'].should.be.\ + greater_than(timestamp) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_millis_behind_latest(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + conn.put_record(StreamName=stream_name, + Data='0', + PartitionKey='0') + time.sleep(1.0) + conn.put_record(StreamName=stream_name, + Data='1', + PartitionKey='1') + + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='TRIM_HORIZON') + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator, Limit=1) + response['Records'].should.have.length_of(1) + response['MillisBehindLatest'].should.be.greater_than(0) + + +@mock_kinesis +def test_get_records_at_very_new_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + # Create some data + keys = [str(i) for i in range(1, 5)] + for k in keys: + conn.put_record(StreamName=stream_name, + Data=k, + PartitionKey=k) + + timestamp = datetime.datetime.utcnow() + datetime.timedelta(seconds=1) + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis +def test_get_records_from_empty_stream_at_timestamp(): + conn = boto3.client('kinesis', region_name="us-west-2") + stream_name = "my_stream" + conn.create_stream(StreamName=stream_name, ShardCount=1) + + timestamp = datetime.datetime.utcnow() + + # Get a shard iterator + response = conn.describe_stream(StreamName=stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator(StreamName=stream_name, + ShardId=shard_id, + ShardIteratorType='AT_TIMESTAMP', + Timestamp=timestamp) + shard_iterator = response['ShardIterator'] + + response = conn.get_records(ShardIterator=shard_iterator) + + response['Records'].should.have.length_of(0) + response['MillisBehindLatest'].should.equal(0) + + +@mock_kinesis_deprecated +def test_invalid_shard_iterator_type(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + response = conn.describe_stream(stream_name) + shard_id = response['StreamDescription']['Shards'][0]['ShardId'] + response = conn.get_shard_iterator.when.called_with( + stream_name, shard_id, 'invalid-type').should.throw(InvalidArgumentException) + + +@mock_kinesis_deprecated +def test_add_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + + +@mock_kinesis_deprecated +def test_list_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.add_tags_to_stream(stream_name, {'tag1': 'val3'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val3') + conn.add_tags_to_stream(stream_name, {'tag2': 'val4'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val4') + + +@mock_kinesis_deprecated +def test_remove_tags(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = "my_stream" + conn.create_stream(stream_name, 1) + + conn.describe_stream(stream_name) + conn.add_tags_to_stream(stream_name, {'tag1': 'val1'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal('val1') + conn.remove_tags_from_stream(stream_name, ['tag1']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag1').should.equal(None) + + conn.add_tags_to_stream(stream_name, {'tag2': 'val2'}) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal('val2') + conn.remove_tags_from_stream(stream_name, ['tag2']) + tags = dict([(tag['Key'], tag['Value']) + for tag in conn.list_tags_for_stream(stream_name)['Tags']]) + tags.get('tag2').should.equal(None) + + +@mock_kinesis_deprecated +def test_split_shard(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 2) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[0]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[0]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + shard_range = shards[2]['HashKeyRange'] + new_starting_hash = ( + int(shard_range['EndingHashKey']) + int(shard_range['StartingHashKey'])) // 2 + conn.split_shard("my_stream", shards[2]['ShardId'], str(new_starting_hash)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + +@mock_kinesis_deprecated +def test_merge_shards(): + conn = boto.kinesis.connect_to_region("us-west-2") + stream_name = 'my_stream' + + conn.create_stream(stream_name, 4) + + # Create some data + for index in range(1, 100): + conn.put_record(stream_name, str(index), str(index)) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + + conn.merge_shards.when.called_with( + stream_name, 'shardId-000000000000', 'shardId-000000000002').should.throw(InvalidArgumentException) + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(4) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + + conn.merge_shards(stream_name, 'shardId-000000000000', + 'shardId-000000000001') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(3) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) + conn.merge_shards(stream_name, 'shardId-000000000002', + 'shardId-000000000000') + + stream_response = conn.describe_stream(stream_name) + + stream = stream_response["StreamDescription"] + shards = stream['Shards'] + shards.should.have.length_of(2) + sum([shard['SequenceNumberRange']['EndingSequenceNumber'] + for shard in shards]).should.equal(99) diff --git a/tests/test_kinesis/test_server.py b/tests/test_kinesis/test_server.py index 527310d75..b88ab1bb2 100644 --- a/tests/test_kinesis/test_server.py +++ b/tests/test_kinesis/test_server.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_kinesis - -''' -Test the different server responses -''' - - -@mock_kinesis -def test_list_streams(): - backend = server.create_backend_app("kinesis") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListStreams') - - json_data = json.loads(res.data.decode("utf-8")) - json_data.should.equal({ - "HasMoreStreams": False, - "StreamNames": [], - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_kinesis + +''' +Test the different server responses +''' + + +@mock_kinesis +def test_list_streams(): + backend = server.create_backend_app("kinesis") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListStreams') + + json_data = json.loads(res.data.decode("utf-8")) + json_data.should.equal({ + "HasMoreStreams": False, + "StreamNames": [], + }) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8bccae27a..830c531a2 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,719 +1,719 @@ -from __future__ import unicode_literals -import os, re - -import boto3 -import boto.kms -from boto.exception import JSONResponseError -from boto.kms.exceptions import AlreadyExistsException, NotFoundException -import sure # noqa -from moto import mock_kms, mock_kms_deprecated -from nose.tools import assert_raises -from freezegun import freeze_time -from datetime import datetime, timedelta -from dateutil.tz import tzlocal - - -@mock_kms_deprecated -def test_create_key(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - key['KeyMetadata']['Enabled'].should.equal(True) - - -@mock_kms_deprecated -def test_describe_key(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - key = conn.describe_key(key_id) - key['KeyMetadata']['Description'].should.equal("my key") - key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - - -@mock_kms_deprecated -def test_describe_key_via_alias(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - alias_key = conn.describe_key('alias/my-key-alias') - alias_key['KeyMetadata']['Description'].should.equal("my key") - alias_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - - -@mock_kms_deprecated -def test_describe_key_via_alias_not_found(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - conn.describe_key.when.called_with( - 'alias/not-found-alias').should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_describe_key_via_arn(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - arn = key['KeyMetadata']['Arn'] - - the_key = conn.describe_key(arn) - the_key['KeyMetadata']['Description'].should.equal("my key") - the_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") - the_key['KeyMetadata']['KeyId'].should.equal(key['KeyMetadata']['KeyId']) - - -@mock_kms_deprecated -def test_describe_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.describe_key.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_list_keys(): - conn = boto.kms.connect_to_region("us-west-2") - - conn.create_key(policy="my policy", description="my key1", - key_usage='ENCRYPT_DECRYPT') - conn.create_key(policy="my policy", description="my key2", - key_usage='ENCRYPT_DECRYPT') - - keys = conn.list_keys() - keys['Keys'].should.have.length_of(2) - - -@mock_kms_deprecated -def test_enable_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.enable_key_rotation(key_id) - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - -@mock_kms_deprecated -def test_enable_key_rotation_via_arn(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['Arn'] - - conn.enable_key_rotation(key_id) - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - -@mock_kms_deprecated -def test_enable_key_rotation_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.enable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_enable_key_rotation_with_alias_name_should_fail(): - conn = boto.kms.connect_to_region("us-west-2") - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - alias_key = conn.describe_key('alias/my-key-alias') - alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) - - conn.enable_key_rotation.when.called_with( - 'alias/my-alias').should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_disable_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.enable_key_rotation(key_id) - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(True) - - conn.disable_key_rotation(key_id) - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_encrypt(): - """ - test_encrypt - Using base64 encoding to merely test that the endpoint was called - """ - conn = boto.kms.connect_to_region("us-west-2") - response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) - response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') - - -@mock_kms_deprecated -def test_decrypt(): - conn = boto.kms.connect_to_region('us-west-2') - response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) - response['Plaintext'].should.equal(b'encryptme') - - -@mock_kms_deprecated -def test_disable_key_rotation_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.disable_key_rotation.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_get_key_rotation_status_with_missing_key(): - conn = boto.kms.connect_to_region("us-west-2") - conn.get_key_rotation_status.when.called_with( - "not-a-key").should.throw(JSONResponseError) - - -@mock_kms_deprecated -def test_get_key_rotation_status(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_create_key_defaults_key_rotation(): - conn = boto.kms.connect_to_region("us-west-2") - - key = conn.create_key(policy="my policy", - description="my key", key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.get_key_rotation_status( - key_id)['KeyRotationEnabled'].should.equal(False) - - -@mock_kms_deprecated -def test_get_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_get_key_policy_via_arn(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') - - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_put_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - conn.put_key_policy(key_id, 'default', 'new policy') - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_put_key_policy_via_arn(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['Arn'] - - conn.put_key_policy(key_id, 'default', 'new policy') - policy = conn.get_key_policy(key_id, 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_put_key_policy_via_alias_should_not_update(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.create_alias(alias_name='alias/my-key-alias', - target_key_id=key['KeyMetadata']['KeyId']) - - conn.put_key_policy.when.called_with( - 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) - - policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') - policy['Policy'].should.equal('my policy') - - -@mock_kms_deprecated -def test_put_key_policy(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') - - policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') - policy['Policy'].should.equal('new policy') - - -@mock_kms_deprecated -def test_list_key_policies(): - conn = boto.kms.connect_to_region('us-west-2') - - key = conn.create_key(policy='my policy', - description='my key1', key_usage='ENCRYPT_DECRYPT') - key_id = key['KeyMetadata']['KeyId'] - - policies = conn.list_key_policies(key_id) - policies['PolicyNames'].should.equal(['default']) - - -@mock_kms_deprecated -def test__create_alias__returns_none_if_correct(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - resp = kms.create_alias('alias/my-alias', key_id) - - resp.should.be.none - - -@mock_kms_deprecated -def test__create_alias__raises_if_reserved_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - reserved_aliases = [ - 'alias/aws/ebs', - 'alias/aws/s3', - 'alias/aws/redshift', - 'alias/aws/rds', - ] - - for alias_name in reserved_aliases: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - - ex = err.exception - ex.error_message.should.be.none - ex.error_code.should.equal('NotAuthorizedException') - ex.body.should.equal({'__type': 'NotAuthorizedException'}) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__can_create_multiple_aliases_for_same_key_id(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - kms.create_alias('alias/my-alias3', key_id).should.be.none - kms.create_alias('alias/my-alias4', key_id).should.be.none - kms.create_alias('alias/my-alias5', key_id).should.be.none - - -@mock_kms_deprecated -def test__create_alias__raises_if_wrong_prefix(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - with assert_raises(JSONResponseError) as err: - kms.create_alias('wrongprefix/my-alias', key_id) - - ex = err.exception - ex.error_message.should.equal('Invalid identifier') - ex.error_code.should.equal('ValidationException') - ex.body.should.equal({'message': 'Invalid identifier', - '__type': 'ValidationException'}) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_duplicate(): - region = 'us-west-2' - kms = boto.kms.connect_to_region(region) - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - kms.create_alias(alias, key_id) - - with assert_raises(AlreadyExistsException) as err: - kms.create_alias(alias, key_id) - - ex = err.exception - ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' - .format(**locals())) - ex.error_code.should.be.none - ex.box_usage.should.be.none - ex.request_id.should.be.none - ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' - .format(**locals())) - ex.body['__type'].should.equal('AlreadyExistsException') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_alias_has_restricted_characters(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_restricted_characters = [ - 'alias/my-alias!', - 'alias/my-alias$', - 'alias/my-alias@', - ] - - for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal( - "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) - ex.error_code.should.equal('ValidationException') - ex.message.should.equal( - "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__raises_if_alias_has_colon_character(): - # For some reason, colons are not accepted for an alias, even though they - # are accepted by regex ^[a-zA-Z0-9:/_-]+$ - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_restricted_characters = [ - 'alias/my:alias', - ] - - for alias_name in alias_names_with_restricted_characters: - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias_name, key_id) - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal( - "{alias_name} contains invalid characters for an alias".format(**locals())) - ex.error_code.should.equal('ValidationException') - ex.message.should.equal( - "{alias_name} contains invalid characters for an alias".format(**locals())) - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__create_alias__accepted_characters(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - - alias_names_with_accepted_characters = [ - 'alias/my-alias_/', - 'alias/my_alias-/', - ] - - for alias_name in alias_names_with_accepted_characters: - kms.create_alias(alias_name, key_id) - - -@mock_kms_deprecated -def test__create_alias__raises_if_target_key_id_is_existing_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - kms.create_alias(alias, key_id) - - with assert_raises(JSONResponseError) as err: - kms.create_alias(alias, alias) - - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal('Aliases must refer to keys. Not aliases') - ex.error_code.should.equal('ValidationException') - ex.message.should.equal('Aliases must refer to keys. Not aliases') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__delete_alias(): - kms = boto.connect_kms() - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - alias = 'alias/my-alias' - - # added another alias here to make sure that the deletion of the alias can - # be done when there are multiple existing aliases. - another_create_resp = kms.create_key() - another_key_id = create_resp['KeyMetadata']['KeyId'] - another_alias = 'alias/another-alias' - - kms.create_alias(alias, key_id) - kms.create_alias(another_alias, another_key_id) - - resp = kms.delete_alias(alias) - - resp.should.be.none - - # we can create the alias again, since it has been deleted - kms.create_alias(alias, key_id) - - -@mock_kms_deprecated -def test__delete_alias__raises_if_wrong_prefix(): - kms = boto.connect_kms() - - with assert_raises(JSONResponseError) as err: - kms.delete_alias('wrongprefix/my-alias') - - ex = err.exception - ex.body['__type'].should.equal('ValidationException') - ex.body['message'].should.equal('Invalid identifier') - ex.error_code.should.equal('ValidationException') - ex.message.should.equal('Invalid identifier') - ex.reason.should.equal('Bad Request') - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__delete_alias__raises_if_alias_is_not_found(): - region = 'us-west-2' - kms = boto.kms.connect_to_region(region) - alias_name = 'alias/unexisting-alias' - - with assert_raises(NotFoundException) as err: - kms.delete_alias(alias_name) - - ex = err.exception - ex.body['__type'].should.equal('NotFoundException') - ex.body['message'].should.match( - r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) - ex.box_usage.should.be.none - ex.error_code.should.be.none - ex.message.should.match( - r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) - ex.reason.should.equal('Bad Request') - ex.request_id.should.be.none - ex.status.should.equal(400) - - -@mock_kms_deprecated -def test__list_aliases(): - region = "eu-west-1" - kms = boto.kms.connect_to_region(region) - - create_resp = kms.create_key() - key_id = create_resp['KeyMetadata']['KeyId'] - kms.create_alias('alias/my-alias1', key_id) - kms.create_alias('alias/my-alias2', key_id) - kms.create_alias('alias/my-alias3', key_id) - - resp = kms.list_aliases() - - resp['Truncated'].should.be.false - - aliases = resp['Aliases'] - - def has_correct_arn(alias_obj): - alias_name = alias_obj['AliasName'] - alias_arn = alias_obj['AliasArn'] - return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name), - alias_arn) - - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1) - - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1) - len([alias for alias in aliases if - has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) - - len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == - alias['TargetKeyId']]).should.equal(3) - - len(aliases).should.equal(7) - - -@mock_kms_deprecated -def test__assert_valid_key_id(): - from moto.kms.responses import _assert_valid_key_id - import uuid - - _assert_valid_key_id.when.called_with( - "not-a-key").should.throw(JSONResponseError) - _assert_valid_key_id.when.called_with( - str(uuid.uuid4())).should_not.throw(JSONResponseError) - - -@mock_kms_deprecated -def test__assert_default_policy(): - from moto.kms.responses import _assert_default_policy - - _assert_default_policy.when.called_with( - "not-default").should.throw(JSONResponseError) - _assert_default_policy.when.called_with( - "default").should_not.throw(JSONResponseError) - - -@mock_kms -def test_kms_encrypt_boto3(): - client = boto3.client('kms', region_name='us-east-1') - response = client.encrypt(KeyId='foo', Plaintext=b'bar') - - response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) - response['Plaintext'].should.equal(b'bar') - - -@mock_kms -def test_disable_key(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='disable-key') - client.disable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'Disabled' - - -@mock_kms -def test_enable_key(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='enable-key') - client.disable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - client.enable_key( - KeyId=key['KeyMetadata']['KeyId'] - ) - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == True - assert result["KeyMetadata"]["KeyState"] == 'Enabled' - - -@mock_kms -def test_schedule_key_deletion(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='schedule-key-deletion') - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' - assert 'DeletionDate' in result["KeyMetadata"] - - -@mock_kms -def test_schedule_key_deletion_custom(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='schedule-key-deletion') - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'], - PendingWindowInDays=7 - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'], - PendingWindowInDays=7 - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' - assert 'DeletionDate' in result["KeyMetadata"] - - -@mock_kms -def test_cancel_key_deletion(): - client = boto3.client('kms', region_name='us-east-1') - key = client.create_key(Description='cancel-key-deletion') - client.schedule_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - response = client.cancel_key_deletion( - KeyId=key['KeyMetadata']['KeyId'] - ) - assert response['KeyId'] == key['KeyMetadata']['KeyId'] - - result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == 'Disabled' - assert 'DeletionDate' not in result["KeyMetadata"] +from __future__ import unicode_literals +import os, re + +import boto3 +import boto.kms +from boto.exception import JSONResponseError +from boto.kms.exceptions import AlreadyExistsException, NotFoundException +import sure # noqa +from moto import mock_kms, mock_kms_deprecated +from nose.tools import assert_raises +from freezegun import freeze_time +from datetime import datetime, timedelta +from dateutil.tz import tzlocal + + +@mock_kms_deprecated +def test_create_key(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + key['KeyMetadata']['Enabled'].should.equal(True) + + +@mock_kms_deprecated +def test_describe_key(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + key = conn.describe_key(key_id) + key['KeyMetadata']['Description'].should.equal("my key") + key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + + +@mock_kms_deprecated +def test_describe_key_via_alias(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + alias_key = conn.describe_key('alias/my-key-alias') + alias_key['KeyMetadata']['Description'].should.equal("my key") + alias_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) + + +@mock_kms_deprecated +def test_describe_key_via_alias_not_found(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + conn.describe_key.when.called_with( + 'alias/not-found-alias').should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_describe_key_via_arn(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + arn = key['KeyMetadata']['Arn'] + + the_key = conn.describe_key(arn) + the_key['KeyMetadata']['Description'].should.equal("my key") + the_key['KeyMetadata']['KeyUsage'].should.equal("ENCRYPT_DECRYPT") + the_key['KeyMetadata']['KeyId'].should.equal(key['KeyMetadata']['KeyId']) + + +@mock_kms_deprecated +def test_describe_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.describe_key.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_list_keys(): + conn = boto.kms.connect_to_region("us-west-2") + + conn.create_key(policy="my policy", description="my key1", + key_usage='ENCRYPT_DECRYPT') + conn.create_key(policy="my policy", description="my key2", + key_usage='ENCRYPT_DECRYPT') + + keys = conn.list_keys() + keys['Keys'].should.have.length_of(2) + + +@mock_kms_deprecated +def test_enable_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.enable_key_rotation(key_id) + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + +@mock_kms_deprecated +def test_enable_key_rotation_via_arn(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['Arn'] + + conn.enable_key_rotation(key_id) + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + +@mock_kms_deprecated +def test_enable_key_rotation_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.enable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_enable_key_rotation_with_alias_name_should_fail(): + conn = boto.kms.connect_to_region("us-west-2") + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + alias_key = conn.describe_key('alias/my-key-alias') + alias_key['KeyMetadata']['Arn'].should.equal(key['KeyMetadata']['Arn']) + + conn.enable_key_rotation.when.called_with( + 'alias/my-alias').should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_disable_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.enable_key_rotation(key_id) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(True) + + conn.disable_key_rotation(key_id) + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_encrypt(): + """ + test_encrypt + Using base64 encoding to merely test that the endpoint was called + """ + conn = boto.kms.connect_to_region("us-west-2") + response = conn.encrypt('key_id', 'encryptme'.encode('utf-8')) + response['CiphertextBlob'].should.equal(b'ZW5jcnlwdG1l') + + +@mock_kms_deprecated +def test_decrypt(): + conn = boto.kms.connect_to_region('us-west-2') + response = conn.decrypt('ZW5jcnlwdG1l'.encode('utf-8')) + response['Plaintext'].should.equal(b'encryptme') + + +@mock_kms_deprecated +def test_disable_key_rotation_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.disable_key_rotation.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_get_key_rotation_status_with_missing_key(): + conn = boto.kms.connect_to_region("us-west-2") + conn.get_key_rotation_status.when.called_with( + "not-a-key").should.throw(JSONResponseError) + + +@mock_kms_deprecated +def test_get_key_rotation_status(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_create_key_defaults_key_rotation(): + conn = boto.kms.connect_to_region("us-west-2") + + key = conn.create_key(policy="my policy", + description="my key", key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.get_key_rotation_status( + key_id)['KeyRotationEnabled'].should.equal(False) + + +@mock_kms_deprecated +def test_get_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_get_key_policy_via_arn(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + policy = conn.get_key_policy(key['KeyMetadata']['Arn'], 'default') + + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_put_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + conn.put_key_policy(key_id, 'default', 'new policy') + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_put_key_policy_via_arn(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['Arn'] + + conn.put_key_policy(key_id, 'default', 'new policy') + policy = conn.get_key_policy(key_id, 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_put_key_policy_via_alias_should_not_update(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.create_alias(alias_name='alias/my-key-alias', + target_key_id=key['KeyMetadata']['KeyId']) + + conn.put_key_policy.when.called_with( + 'alias/my-key-alias', 'default', 'new policy').should.throw(JSONResponseError) + + policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') + policy['Policy'].should.equal('my policy') + + +@mock_kms_deprecated +def test_put_key_policy(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + conn.put_key_policy(key['KeyMetadata']['Arn'], 'default', 'new policy') + + policy = conn.get_key_policy(key['KeyMetadata']['KeyId'], 'default') + policy['Policy'].should.equal('new policy') + + +@mock_kms_deprecated +def test_list_key_policies(): + conn = boto.kms.connect_to_region('us-west-2') + + key = conn.create_key(policy='my policy', + description='my key1', key_usage='ENCRYPT_DECRYPT') + key_id = key['KeyMetadata']['KeyId'] + + policies = conn.list_key_policies(key_id) + policies['PolicyNames'].should.equal(['default']) + + +@mock_kms_deprecated +def test__create_alias__returns_none_if_correct(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + resp = kms.create_alias('alias/my-alias', key_id) + + resp.should.be.none + + +@mock_kms_deprecated +def test__create_alias__raises_if_reserved_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + reserved_aliases = [ + 'alias/aws/ebs', + 'alias/aws/s3', + 'alias/aws/redshift', + 'alias/aws/rds', + ] + + for alias_name in reserved_aliases: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + + ex = err.exception + ex.error_message.should.be.none + ex.error_code.should.equal('NotAuthorizedException') + ex.body.should.equal({'__type': 'NotAuthorizedException'}) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__can_create_multiple_aliases_for_same_key_id(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + kms.create_alias('alias/my-alias3', key_id).should.be.none + kms.create_alias('alias/my-alias4', key_id).should.be.none + kms.create_alias('alias/my-alias5', key_id).should.be.none + + +@mock_kms_deprecated +def test__create_alias__raises_if_wrong_prefix(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + with assert_raises(JSONResponseError) as err: + kms.create_alias('wrongprefix/my-alias', key_id) + + ex = err.exception + ex.error_message.should.equal('Invalid identifier') + ex.error_code.should.equal('ValidationException') + ex.body.should.equal({'message': 'Invalid identifier', + '__type': 'ValidationException'}) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_duplicate(): + region = 'us-west-2' + kms = boto.kms.connect_to_region(region) + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + kms.create_alias(alias, key_id) + + with assert_raises(AlreadyExistsException) as err: + kms.create_alias(alias, key_id) + + ex = err.exception + ex.error_message.should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' + .format(**locals())) + ex.error_code.should.be.none + ex.box_usage.should.be.none + ex.request_id.should.be.none + ex.body['message'].should.match(r'An alias with the name arn:aws:kms:{region}:\d{{12}}:{alias} already exists' + .format(**locals())) + ex.body['__type'].should.equal('AlreadyExistsException') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_alias_has_restricted_characters(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_restricted_characters = [ + 'alias/my-alias!', + 'alias/my-alias$', + 'alias/my-alias@', + ] + + for alias_name in alias_names_with_restricted_characters: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.error_code.should.equal('ValidationException') + ex.message.should.equal( + "1 validation error detected: Value '{alias_name}' at 'aliasName' failed to satisfy constraint: Member must satisfy regular expression pattern: ^[a-zA-Z0-9:/_-]+$".format(**locals())) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__raises_if_alias_has_colon_character(): + # For some reason, colons are not accepted for an alias, even though they + # are accepted by regex ^[a-zA-Z0-9:/_-]+$ + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_restricted_characters = [ + 'alias/my:alias', + ] + + for alias_name in alias_names_with_restricted_characters: + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias_name, key_id) + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) + ex.error_code.should.equal('ValidationException') + ex.message.should.equal( + "{alias_name} contains invalid characters for an alias".format(**locals())) + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__create_alias__accepted_characters(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + + alias_names_with_accepted_characters = [ + 'alias/my-alias_/', + 'alias/my_alias-/', + ] + + for alias_name in alias_names_with_accepted_characters: + kms.create_alias(alias_name, key_id) + + +@mock_kms_deprecated +def test__create_alias__raises_if_target_key_id_is_existing_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + kms.create_alias(alias, key_id) + + with assert_raises(JSONResponseError) as err: + kms.create_alias(alias, alias) + + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal('Aliases must refer to keys. Not aliases') + ex.error_code.should.equal('ValidationException') + ex.message.should.equal('Aliases must refer to keys. Not aliases') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__delete_alias(): + kms = boto.connect_kms() + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + alias = 'alias/my-alias' + + # added another alias here to make sure that the deletion of the alias can + # be done when there are multiple existing aliases. + another_create_resp = kms.create_key() + another_key_id = create_resp['KeyMetadata']['KeyId'] + another_alias = 'alias/another-alias' + + kms.create_alias(alias, key_id) + kms.create_alias(another_alias, another_key_id) + + resp = kms.delete_alias(alias) + + resp.should.be.none + + # we can create the alias again, since it has been deleted + kms.create_alias(alias, key_id) + + +@mock_kms_deprecated +def test__delete_alias__raises_if_wrong_prefix(): + kms = boto.connect_kms() + + with assert_raises(JSONResponseError) as err: + kms.delete_alias('wrongprefix/my-alias') + + ex = err.exception + ex.body['__type'].should.equal('ValidationException') + ex.body['message'].should.equal('Invalid identifier') + ex.error_code.should.equal('ValidationException') + ex.message.should.equal('Invalid identifier') + ex.reason.should.equal('Bad Request') + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__delete_alias__raises_if_alias_is_not_found(): + region = 'us-west-2' + kms = boto.kms.connect_to_region(region) + alias_name = 'alias/unexisting-alias' + + with assert_raises(NotFoundException) as err: + kms.delete_alias(alias_name) + + ex = err.exception + ex.body['__type'].should.equal('NotFoundException') + ex.body['message'].should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.box_usage.should.be.none + ex.error_code.should.be.none + ex.message.should.match( + r'Alias arn:aws:kms:{region}:\d{{12}}:{alias_name} is not found.'.format(**locals())) + ex.reason.should.equal('Bad Request') + ex.request_id.should.be.none + ex.status.should.equal(400) + + +@mock_kms_deprecated +def test__list_aliases(): + region = "eu-west-1" + kms = boto.kms.connect_to_region(region) + + create_resp = kms.create_key() + key_id = create_resp['KeyMetadata']['KeyId'] + kms.create_alias('alias/my-alias1', key_id) + kms.create_alias('alias/my-alias2', key_id) + kms.create_alias('alias/my-alias3', key_id) + + resp = kms.list_aliases() + + resp['Truncated'].should.be.false + + aliases = resp['Aliases'] + + def has_correct_arn(alias_obj): + alias_name = alias_obj['AliasName'] + alias_arn = alias_obj['AliasArn'] + return re.match(r'arn:aws:kms:{region}:\d{{12}}:{alias_name}'.format(region=region, alias_name=alias_name), + alias_arn) + + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/ebs' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/rds' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/redshift' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/aws/s3' == alias['AliasName']]).should.equal(1) + + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/my-alias1' == alias['AliasName']]).should.equal(1) + len([alias for alias in aliases if + has_correct_arn(alias) and 'alias/my-alias2' == alias['AliasName']]).should.equal(1) + + len([alias for alias in aliases if 'TargetKeyId' in alias and key_id == + alias['TargetKeyId']]).should.equal(3) + + len(aliases).should.equal(7) + + +@mock_kms_deprecated +def test__assert_valid_key_id(): + from moto.kms.responses import _assert_valid_key_id + import uuid + + _assert_valid_key_id.when.called_with( + "not-a-key").should.throw(JSONResponseError) + _assert_valid_key_id.when.called_with( + str(uuid.uuid4())).should_not.throw(JSONResponseError) + + +@mock_kms_deprecated +def test__assert_default_policy(): + from moto.kms.responses import _assert_default_policy + + _assert_default_policy.when.called_with( + "not-default").should.throw(JSONResponseError) + _assert_default_policy.when.called_with( + "default").should_not.throw(JSONResponseError) + + +@mock_kms +def test_kms_encrypt_boto3(): + client = boto3.client('kms', region_name='us-east-1') + response = client.encrypt(KeyId='foo', Plaintext=b'bar') + + response = client.decrypt(CiphertextBlob=response['CiphertextBlob']) + response['Plaintext'].should.equal(b'bar') + + +@mock_kms +def test_disable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='disable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + + +@mock_kms +def test_enable_key(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='enable-key') + client.disable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + client.enable_key( + KeyId=key['KeyMetadata']['KeyId'] + ) + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == 'Enabled' + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 31, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='schedule-key-deletion') + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + assert response['DeletionDate'] == datetime(2015, 1, 8, 12, 0, tzinfo=tzlocal()) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'], + PendingWindowInDays=7 + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'PendingDeletion' + assert 'DeletionDate' in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client('kms', region_name='us-east-1') + key = client.create_key(Description='cancel-key-deletion') + client.schedule_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + response = client.cancel_key_deletion( + KeyId=key['KeyMetadata']['KeyId'] + ) + assert response['KeyId'] == key['KeyMetadata']['KeyId'] + + result = client.describe_key(KeyId=key['KeyMetadata']['KeyId']) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == 'Disabled' + assert 'DeletionDate' not in result["KeyMetadata"] diff --git a/tests/test_kms/test_server.py b/tests/test_kms/test_server.py index 7b8f74e3b..a5aac7d94 100644 --- a/tests/test_kms/test_server.py +++ b/tests/test_kms/test_server.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_kms - -''' -Test the different server responses -''' - - -@mock_kms -def test_list_keys(): - backend = server.create_backend_app("kms") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListKeys') - - json.loads(res.data.decode("utf-8")).should.equal({ - "Keys": [], - "NextMarker": None, - "Truncated": False, - }) +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_kms + +''' +Test the different server responses +''' + + +@mock_kms +def test_list_keys(): + backend = server.create_backend_app("kms") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListKeys') + + json.loads(res.data.decode("utf-8")).should.equal({ + "Keys": [], + "NextMarker": None, + "Truncated": False, + }) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index e3d46fd87..443bc8c2f 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,128 +1,128 @@ -import boto3 -import sure # noqa -import six -from botocore.exceptions import ClientError - -from moto import mock_logs, settings -from nose.tools import assert_raises - -_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' - - -@mock_logs -def test_log_group_create(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - response = conn.create_log_group(logGroupName=log_group_name) - - response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) - assert len(response['logGroups']) == 1 - - response = conn.delete_log_group(logGroupName=log_group_name) - - -@mock_logs -def test_exceptions(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'dummp-stream' - conn.create_log_group(logGroupName=log_group_name) - with assert_raises(ClientError): - conn.create_log_group(logGroupName=log_group_name) - - # descrine_log_groups is not implemented yet - - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - with assert_raises(ClientError): - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - with assert_raises(ClientError): - conn.put_log_events( - logGroupName=log_group_name, - logStreamName="invalid-stream", - logEvents=[ - { - 'timestamp': 0, - 'message': 'line' - }, - ], - ) - - -@mock_logs -def test_put_logs(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - putRes = conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.get_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - events = res['events'] - nextSequenceToken = putRes['nextSequenceToken'] - assert isinstance(nextSequenceToken, six.string_types) == True - assert len(nextSequenceToken) == 56 - events.should.have.length_of(2) - - -@mock_logs -def test_filter_logs_interleaved(): - conn = boto3.client('logs', 'us-west-2') - log_group_name = 'dummy' - log_stream_name = 'stream' - conn.create_log_group(logGroupName=log_group_name) - conn.create_log_stream( - logGroupName=log_group_name, - logStreamName=log_stream_name - ) - messages = [ - {'timestamp': 0, 'message': 'hello'}, - {'timestamp': 0, 'message': 'world'} - ] - conn.put_log_events( - logGroupName=log_group_name, - logStreamName=log_stream_name, - logEvents=messages - ) - res = conn.filter_log_events( - logGroupName=log_group_name, - logStreamNames=[log_stream_name], - interleaved=True, - ) - events = res['events'] - for original_message, resulting_event in zip(messages, events): - resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) - resulting_event['timestamp'].should.equal(original_message['timestamp']) - resulting_event['message'].should.equal(original_message['message']) - +import boto3 +import sure # noqa +import six +from botocore.exceptions import ClientError + +from moto import mock_logs, settings +from nose.tools import assert_raises + +_logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' + + +@mock_logs +def test_log_group_create(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + response = conn.create_log_group(logGroupName=log_group_name) + + response = conn.describe_log_groups(logGroupNamePrefix=log_group_name) + assert len(response['logGroups']) == 1 + + response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + putRes = conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + nextSequenceToken = putRes['nextSequenceToken'] + assert isinstance(nextSequenceToken, six.string_types) == True + assert len(nextSequenceToken) == 56 + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + for original_message, resulting_event in zip(messages, events): + resulting_event['eventId'].should.equal(str(resulting_event['eventId'])) + resulting_event['timestamp'].should.equal(original_message['timestamp']) + resulting_event['message'].should.equal(original_message['message']) + diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py index 37d0f2fe4..d13ce8eaf 100644 --- a/tests/test_opsworks/test_apps.py +++ b/tests/test_opsworks/test_apps.py @@ -1,102 +1,102 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_app_response(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_app( - StackId=stack_id, - Type="other", - Name="TestApp" - ) - - response.should.contain("AppId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_app( - StackId=second_stack_id, - Type="other", - Name="TestApp" - ) - - response.should.contain("AppId") - - # ClientError - client.create_app.when.called_with( - StackId=stack_id, - Type="other", - Name="TestApp" - ).should.throw( - Exception, re.compile(r'already an app named "TestApp"') - ) - - # ClientError - client.create_app.when.called_with( - StackId="nothere", - Type="other", - Name="TestApp" - ).should.throw( - Exception, "nothere" - ) - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_apps(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - app_id = client.create_app( - StackId=stack_id, - Type="other", - Name="TestApp" - )['AppId'] - - rv1 = client.describe_apps(StackId=stack_id) - rv2 = client.describe_apps(AppIds=[app_id]) - rv1['Apps'].should.equal(rv2['Apps']) - - rv1['Apps'][0]['Name'].should.equal("TestApp") - - # ClientError - client.describe_apps.when.called_with( - StackId=stack_id, - AppIds=[app_id] - ).should.throw( - Exception, "Please provide one or more app IDs or a stack ID" - ) - # ClientError - client.describe_apps.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_apps.when.called_with( - AppIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=second_stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, + Type="other", + Name="TestApp" + ).should.throw( + Exception, re.compile(r'already an app named "TestApp"') + ) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", + Type="other", + Name="TestApp" + ).should.throw( + Exception, "nothere" + ) + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + app_id = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + )['AppId'] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1['Apps'].should.equal(rv2['Apps']) + + rv1['Apps'][0]['Name'].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, + AppIds=[app_id] + ).should.throw( + Exception, "Please provide one or more app IDs or a stack ID" + ) + # ClientError + client.describe_apps.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with( + AppIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index f594a87c8..25260ad78 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -1,224 +1,224 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa - -from moto import mock_opsworks -from moto import mock_ec2 - - -@mock_opsworks -def test_create_instance(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - )['LayerId'] - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - second_layer_id = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="SecondTestLayer", - Shortname="SecondTestLayerShortName" - )['LayerId'] - - response = client.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" - ) - - response.should.contain("InstanceId") - - client.create_instance.when.called_with( - StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Unable to find stack with ID nothere") - - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" - ).should.throw(Exception, "nothere") - # ClientError - client.create_instance.when.called_with( - StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" - ).should.throw(Exception, "Please only provide layer IDs from the same stack") - # ClientError - client.start_instance.when.called_with( - InstanceId="nothere" - ).should.throw(Exception, "Unable to find instance with ID nothere") - - -@mock_opsworks -def test_describe_instances(): - """ - create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) - - populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) - populate S2L1 with 1 instance (S2L1_i1) - populate S2L2 with 3 instances (S2L2_i1..2) - """ - - client = boto3.client('opsworks', region_name='us-east-1') - S1 = client.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - S1L1 = client.create_layer( - StackId=S1, - Type="custom", - Name="S1L1", - Shortname="S1L1" - )['LayerId'] - S2 = client.create_stack( - Name="S2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - S2L1 = client.create_layer( - StackId=S2, - Type="custom", - Name="S2L1", - Shortname="S2L1" - )['LayerId'] - S2L2 = client.create_layer( - StackId=S2, - Type="custom", - Name="S2L2", - Shortname="S2L2" - )['LayerId'] - - S1L1_i1 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )['InstanceId'] - S1L1_i2 = client.create_instance( - StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" - )['InstanceId'] - S2L1_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" - )['InstanceId'] - S2L2_i1 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )['InstanceId'] - S2L2_i2 = client.create_instance( - StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" - )['InstanceId'] - - # instances in Stack 1 - response = client.describe_instances(StackId=S1)['Instances'] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - response2 = client.describe_instances( - InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] - sorted(response2, key=lambda d: d['InstanceId']).should.equal( - sorted(response, key=lambda d: d['InstanceId'])) - - response3 = client.describe_instances(LayerId=S1L1)['Instances'] - sorted(response3, key=lambda d: d['InstanceId']).should.equal( - sorted(response, key=lambda d: d['InstanceId'])) - - response = client.describe_instances(StackId=S1)['Instances'] - response.should.have.length_of(2) - S1L1_i1.should.be.within([i["InstanceId"] for i in response]) - S1L1_i2.should.be.within([i["InstanceId"] for i in response]) - - # instances in Stack 2 - response = client.describe_instances(StackId=S2)['Instances'] - response.should.have.length_of(3) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i1.should.be.within([i["InstanceId"] for i in response]) - S2L2_i2.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L1)['Instances'] - response.should.have.length_of(1) - S2L1_i1.should.be.within([i["InstanceId"] for i in response]) - - response = client.describe_instances(LayerId=S2L2)['Instances'] - response.should.have.length_of(2) - S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) - - # ClientError - client.describe_instances.when.called_with( - StackId=S1, - LayerId=S1L1 - ).should.throw( - Exception, "Please provide either one or more" - ) - # ClientError - client.describe_instances.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with( - LayerId="nothere" - ).should.throw( - Exception, "nothere" - ) - # ClientError - client.describe_instances.when.called_with( - InstanceIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) - - -@mock_opsworks -@mock_ec2 -def test_ec2_integration(): - """ - instances created via OpsWorks should be discoverable via ec2 - """ - - opsworks = boto3.client('opsworks', region_name='us-east-1') - stack_id = opsworks.create_stack( - Name="S1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - layer_id = opsworks.create_layer( - StackId=stack_id, - Type="custom", - Name="S1L1", - Shortname="S1L1" - )['LayerId'] - - instance_id = opsworks.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" - )['InstanceId'] - - ec2 = boto3.client('ec2', region_name='us-east-1') - - # Before starting the instance, it shouldn't be discoverable via ec2 - reservations = ec2.describe_instances()['Reservations'] - assert reservations.should.be.empty - - # After starting the instance, it should be discoverable via ec2 - opsworks.start_instance(InstanceId=instance_id) - reservations = ec2.describe_instances()['Reservations'] - reservations[0]['Instances'].should.have.length_of(1) - instance = reservations[0]['Instances'][0] - opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ - 'Instances'][0] - - instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) - instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) +from __future__ import unicode_literals +import boto3 +import sure # noqa + +from moto import mock_opsworks +from moto import mock_ec2 + + +@mock_opsworks +def test_create_instance(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + )['LayerId'] + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName" + )['LayerId'] + + response = client.create_instance( + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + ) + + response.should.contain("InstanceId") + + client.create_instance.when.called_with( + StackId="nothere", LayerIds=[layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Unable to find stack with ID nothere") + + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" + ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with( + InstanceId="nothere" + ).should.throw(Exception, "Unable to find instance with ID nothere") + + +@mock_opsworks +def test_describe_instances(): + """ + create two stacks, with 1 layer and 2 layers (S1L1, S2L1, S2L2) + + populate S1L1 with 2 instances (S1L1_i1, S1L1_i2) + populate S2L1 with 1 instance (S2L1_i1) + populate S2L2 with 3 instances (S2L2_i1..2) + """ + + client = boto3.client('opsworks', region_name='us-east-1') + S1 = client.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + S1L1 = client.create_layer( + StackId=S1, + Type="custom", + Name="S1L1", + Shortname="S1L1" + )['LayerId'] + S2 = client.create_stack( + Name="S2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + S2L1 = client.create_layer( + StackId=S2, + Type="custom", + Name="S2L1", + Shortname="S2L1" + )['LayerId'] + S2L2 = client.create_layer( + StackId=S2, + Type="custom", + Name="S2L2", + Shortname="S2L2" + )['LayerId'] + + S1L1_i1 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )['InstanceId'] + S1L1_i2 = client.create_instance( + StackId=S1, LayerIds=[S1L1], InstanceType="t2.micro" + )['InstanceId'] + S2L1_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L1], InstanceType="t2.micro" + )['InstanceId'] + S2L2_i1 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )['InstanceId'] + S2L2_i2 = client.create_instance( + StackId=S2, LayerIds=[S2L2], InstanceType="t2.micro" + )['InstanceId'] + + # instances in Stack 1 + response = client.describe_instances(StackId=S1)['Instances'] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + response2 = client.describe_instances( + InstanceIds=[S1L1_i1, S1L1_i2])['Instances'] + sorted(response2, key=lambda d: d['InstanceId']).should.equal( + sorted(response, key=lambda d: d['InstanceId'])) + + response3 = client.describe_instances(LayerId=S1L1)['Instances'] + sorted(response3, key=lambda d: d['InstanceId']).should.equal( + sorted(response, key=lambda d: d['InstanceId'])) + + response = client.describe_instances(StackId=S1)['Instances'] + response.should.have.length_of(2) + S1L1_i1.should.be.within([i["InstanceId"] for i in response]) + S1L1_i2.should.be.within([i["InstanceId"] for i in response]) + + # instances in Stack 2 + response = client.describe_instances(StackId=S2)['Instances'] + response.should.have.length_of(3) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i1.should.be.within([i["InstanceId"] for i in response]) + S2L2_i2.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L1)['Instances'] + response.should.have.length_of(1) + S2L1_i1.should.be.within([i["InstanceId"] for i in response]) + + response = client.describe_instances(LayerId=S2L2)['Instances'] + response.should.have.length_of(2) + S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + + # ClientError + client.describe_instances.when.called_with( + StackId=S1, + LayerId=S1L1 + ).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + LayerId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + InstanceIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) + + +@mock_opsworks +@mock_ec2 +def test_ec2_integration(): + """ + instances created via OpsWorks should be discoverable via ec2 + """ + + opsworks = boto3.client('opsworks', region_name='us-east-1') + stack_id = opsworks.create_stack( + Name="S1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + layer_id = opsworks.create_layer( + StackId=stack_id, + Type="custom", + Name="S1L1", + Shortname="S1L1" + )['LayerId'] + + instance_id = opsworks.create_instance( + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" + )['InstanceId'] + + ec2 = boto3.client('ec2', region_name='us-east-1') + + # Before starting the instance, it shouldn't be discoverable via ec2 + reservations = ec2.describe_instances()['Reservations'] + assert reservations.should.be.empty + + # After starting the instance, it should be discoverable via ec2 + opsworks.start_instance(InstanceId=instance_id) + reservations = ec2.describe_instances()['Reservations'] + reservations[0]['Instances'].should.have.length_of(1) + instance = reservations[0]['Instances'][0] + opsworks_instance = opsworks.describe_instances(StackId=stack_id)[ + 'Instances'][0] + + instance['InstanceId'].should.equal(opsworks_instance['Ec2InstanceId']) + instance['PrivateIpAddress'].should.equal(opsworks_instance['PrivateIp']) diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 9c640dfc3..035c246e2 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -1,117 +1,117 @@ -from __future__ import unicode_literals -import boto3 -from freezegun import freeze_time -import sure # noqa -import re - -from moto import mock_opsworks - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_create_layer_response(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - ) - - response.should.contain("LayerId") - - second_stack_id = client.create_stack( - Name="test_stack_2", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - - response = client.create_layer( - StackId=second_stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - ) - - response.should.contain("LayerId") - - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="_" - ).should.throw( - Exception, re.compile(r'already a layer named "TestLayer"') - ) - # ClientError - client.create_layer.when.called_with( - StackId=stack_id, - Type="custom", - Name="_", - Shortname="TestLayerShortName" - ).should.throw( - Exception, re.compile( - r'already a layer with shortname "TestLayerShortName"') - ) - # ClientError - client.create_layer.when.called_with( - StackId="nothere", - Type="custom", - Name="TestLayer", - Shortname="_" - ).should.throw( - Exception, "nothere" - ) - - -@freeze_time("2015-01-01") -@mock_opsworks -def test_describe_layers(): - client = boto3.client('opsworks', region_name='us-east-1') - stack_id = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - )['StackId'] - layer_id = client.create_layer( - StackId=stack_id, - Type="custom", - Name="TestLayer", - Shortname="TestLayerShortName" - )['LayerId'] - - rv1 = client.describe_layers(StackId=stack_id) - rv2 = client.describe_layers(LayerIds=[layer_id]) - rv1['Layers'].should.equal(rv2['Layers']) - - rv1['Layers'][0]['Name'].should.equal("TestLayer") - - # ClientError - client.describe_layers.when.called_with( - StackId=stack_id, - LayerIds=[layer_id] - ).should.throw( - Exception, "Please provide one or more layer IDs or a stack ID" - ) - # ClientError - client.describe_layers.when.called_with( - StackId="nothere" - ).should.throw( - Exception, "Unable to find stack with ID nothere" - ) - # ClientError - client.describe_layers.when.called_with( - LayerIds=["nothere"] - ).should.throw( - Exception, "nothere" - ) +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_layer_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + ) + + response.should.contain("LayerId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + ) + + response.should.contain("LayerId") + + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, re.compile(r'already a layer named "TestLayer"') + ) + # ClientError + client.create_layer.when.called_with( + StackId=stack_id, + Type="custom", + Name="_", + Shortname="TestLayerShortName" + ).should.throw( + Exception, re.compile( + r'already a layer with shortname "TestLayerShortName"') + ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, "nothere" + ) + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_layers(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + layer_id = client.create_layer( + StackId=stack_id, + Type="custom", + Name="TestLayer", + Shortname="TestLayerShortName" + )['LayerId'] + + rv1 = client.describe_layers(StackId=stack_id) + rv2 = client.describe_layers(LayerIds=[layer_id]) + rv1['Layers'].should.equal(rv2['Layers']) + + rv1['Layers'][0]['Name'].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, + LayerIds=[layer_id] + ).should.throw( + Exception, "Please provide one or more layer IDs or a stack ID" + ) + # ClientError + client.describe_layers.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with( + LayerIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_stack.py b/tests/test_opsworks/test_stack.py index 5913ce6d5..2a1b6cc67 100644 --- a/tests/test_opsworks/test_stack.py +++ b/tests/test_opsworks/test_stack.py @@ -1,46 +1,46 @@ -from __future__ import unicode_literals -import boto3 -import sure # noqa -import re - -from moto import mock_opsworks - - -@mock_opsworks -def test_create_stack_response(): - client = boto3.client('opsworks', region_name='us-east-1') - response = client.create_stack( - Name="test_stack_1", - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - ) - response.should.contain("StackId") - - -@mock_opsworks -def test_describe_stacks(): - client = boto3.client('opsworks', region_name='us-east-1') - for i in range(1, 4): - client.create_stack( - Name="test_stack_{0}".format(i), - Region="us-east-1", - ServiceRoleArn="service_arn", - DefaultInstanceProfileArn="profile_arn" - ) - - response = client.describe_stacks() - response['Stacks'].should.have.length_of(3) - for stack in response['Stacks']: - stack['ServiceRoleArn'].should.equal("service_arn") - stack['DefaultInstanceProfileArn'].should.equal("profile_arn") - - _id = response['Stacks'][0]['StackId'] - response = client.describe_stacks(StackIds=[_id]) - response['Stacks'].should.have.length_of(1) - response['Stacks'][0]['Arn'].should.contain(_id) - - # ClientError/ResourceNotFoundException - client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( - Exception, re.compile(r'foo') - ) +from __future__ import unicode_literals +import boto3 +import sure # noqa +import re + +from moto import mock_opsworks + + +@mock_opsworks +def test_create_stack_response(): + client = boto3.client('opsworks', region_name='us-east-1') + response = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + ) + response.should.contain("StackId") + + +@mock_opsworks +def test_describe_stacks(): + client = boto3.client('opsworks', region_name='us-east-1') + for i in range(1, 4): + client.create_stack( + Name="test_stack_{0}".format(i), + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + ) + + response = client.describe_stacks() + response['Stacks'].should.have.length_of(3) + for stack in response['Stacks']: + stack['ServiceRoleArn'].should.equal("service_arn") + stack['DefaultInstanceProfileArn'].should.equal("profile_arn") + + _id = response['Stacks'][0]['StackId'] + response = client.describe_stacks(StackIds=[_id]) + response['Stacks'].should.have.length_of(1) + response['Stacks'][0]['Arn'].should.contain(_id) + + # ClientError/ResourceNotFoundException + client.describe_stacks.when.called_with(StackIds=["foo"]).should.throw( + Exception, re.compile(r'foo') + ) diff --git a/tests/test_organizations/organizations_test_utils.py b/tests/test_organizations/organizations_test_utils.py index 6548b1830..1f21eee74 100644 --- a/tests/test_organizations/organizations_test_utils.py +++ b/tests/test_organizations/organizations_test_utils.py @@ -1,136 +1,136 @@ -from __future__ import unicode_literals - -import six -import sure # noqa -import datetime -from moto.organizations import utils - -EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" -ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE -ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE -OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) -ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE -CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE - - -def test_make_random_org_id(): - org_id = utils.make_random_org_id() - org_id.should.match(ORG_ID_REGEX) - - -def test_make_random_root_id(): - root_id = utils.make_random_root_id() - root_id.should.match(ROOT_ID_REGEX) - - -def test_make_random_ou_id(): - root_id = utils.make_random_root_id() - ou_id = utils.make_random_ou_id(root_id) - ou_id.should.match(OU_ID_REGEX) - - -def test_make_random_account_id(): - account_id = utils.make_random_account_id() - account_id.should.match(ACCOUNT_ID_REGEX) - - -def test_make_random_create_account_status_id(): - create_account_status_id = utils.make_random_create_account_status_id() - create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - - -def validate_organization(response): - org = response['Organization'] - sorted(org.keys()).should.equal([ - 'Arn', - 'AvailablePolicyTypes', - 'FeatureSet', - 'Id', - 'MasterAccountArn', - 'MasterAccountEmail', - 'MasterAccountId', - ]) - org['Id'].should.match(ORG_ID_REGEX) - org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) - org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - )) - org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) - org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) - org['AvailablePolicyTypes'].should.equal([{ - 'Type': 'SERVICE_CONTROL_POLICY', - 'Status': 'ENABLED' - }]) - - -def validate_roots(org, response): - response.should.have.key('Roots').should.be.a(list) - response['Roots'].should_not.be.empty - root = response['Roots'][0] - root.should.have.key('Id').should.match(ROOT_ID_REGEX) - root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - root['Id'], - )) - root.should.have.key('Name').should.be.a(six.string_types) - root.should.have.key('PolicyTypes').should.be.a(list) - root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') - root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') - - -def validate_organizational_unit(org, response): - response.should.have.key('OrganizationalUnit').should.be.a(dict) - ou = response['OrganizationalUnit'] - ou.should.have.key('Id').should.match(OU_ID_REGEX) - ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - ou['Id'], - )) - ou.should.have.key('Name').should.be.a(six.string_types) - - -def validate_account(org, account): - sorted(account.keys()).should.equal([ - 'Arn', - 'Email', - 'Id', - 'JoinedMethod', - 'JoinedTimestamp', - 'Name', - 'Status', - ]) - account['Id'].should.match(ACCOUNT_ID_REGEX) - account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( - org['MasterAccountId'], - org['Id'], - account['Id'], - )) - account['Email'].should.match(EMAIL_REGEX) - account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) - account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) - account['Name'].should.be.a(six.string_types) - account['JoinedTimestamp'].should.be.a(datetime.datetime) - - -def validate_create_account_status(create_status): - sorted(create_status.keys()).should.equal([ - 'AccountId', - 'AccountName', - 'CompletedTimestamp', - 'Id', - 'RequestedTimestamp', - 'State', - ]) - create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) - create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) - create_status['AccountName'].should.be.a(six.string_types) - create_status['State'].should.equal('SUCCEEDED') - create_status['RequestedTimestamp'].should.be.a(datetime.datetime) - create_status['CompletedTimestamp'].should.be.a(datetime.datetime) +from __future__ import unicode_literals + +import six +import sure # noqa +import datetime +from moto.organizations import utils + +EMAIL_REGEX = "^.+@[a-zA-Z0-9-.]+.[a-zA-Z]{2,3}|[0-9]{1,3}$" +ORG_ID_REGEX = r'o-[a-z0-9]{%s}' % utils.ORG_ID_SIZE +ROOT_ID_REGEX = r'r-[a-z0-9]{%s}' % utils.ROOT_ID_SIZE +OU_ID_REGEX = r'ou-[a-z0-9]{%s}-[a-z0-9]{%s}' % (utils.ROOT_ID_SIZE, utils.OU_ID_SUFFIX_SIZE) +ACCOUNT_ID_REGEX = r'[0-9]{%s}' % utils.ACCOUNT_ID_SIZE +CREATE_ACCOUNT_STATUS_ID_REGEX = r'car-[a-z0-9]{%s}' % utils.CREATE_ACCOUNT_STATUS_ID_SIZE + + +def test_make_random_org_id(): + org_id = utils.make_random_org_id() + org_id.should.match(ORG_ID_REGEX) + + +def test_make_random_root_id(): + root_id = utils.make_random_root_id() + root_id.should.match(ROOT_ID_REGEX) + + +def test_make_random_ou_id(): + root_id = utils.make_random_root_id() + ou_id = utils.make_random_ou_id(root_id) + ou_id.should.match(OU_ID_REGEX) + + +def test_make_random_account_id(): + account_id = utils.make_random_account_id() + account_id.should.match(ACCOUNT_ID_REGEX) + + +def test_make_random_create_account_status_id(): + create_account_status_id = utils.make_random_create_account_status_id() + create_account_status_id.should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + + +def validate_organization(response): + org = response['Organization'] + sorted(org.keys()).should.equal([ + 'Arn', + 'AvailablePolicyTypes', + 'FeatureSet', + 'Id', + 'MasterAccountArn', + 'MasterAccountEmail', + 'MasterAccountId', + ]) + org['Id'].should.match(ORG_ID_REGEX) + org['MasterAccountId'].should.equal(utils.MASTER_ACCOUNT_ID) + org['MasterAccountArn'].should.equal(utils.MASTER_ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['Arn'].should.equal(utils.ORGANIZATION_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + )) + org['MasterAccountEmail'].should.equal(utils.MASTER_ACCOUNT_EMAIL) + org['FeatureSet'].should.be.within(['ALL', 'CONSOLIDATED_BILLING']) + org['AvailablePolicyTypes'].should.equal([{ + 'Type': 'SERVICE_CONTROL_POLICY', + 'Status': 'ENABLED' + }]) + + +def validate_roots(org, response): + response.should.have.key('Roots').should.be.a(list) + response['Roots'].should_not.be.empty + root = response['Roots'][0] + root.should.have.key('Id').should.match(ROOT_ID_REGEX) + root.should.have.key('Arn').should.equal(utils.ROOT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + root['Id'], + )) + root.should.have.key('Name').should.be.a(six.string_types) + root.should.have.key('PolicyTypes').should.be.a(list) + root['PolicyTypes'][0].should.have.key('Type').should.equal('SERVICE_CONTROL_POLICY') + root['PolicyTypes'][0].should.have.key('Status').should.equal('ENABLED') + + +def validate_organizational_unit(org, response): + response.should.have.key('OrganizationalUnit').should.be.a(dict) + ou = response['OrganizationalUnit'] + ou.should.have.key('Id').should.match(OU_ID_REGEX) + ou.should.have.key('Arn').should.equal(utils.OU_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + ou['Id'], + )) + ou.should.have.key('Name').should.be.a(six.string_types) + + +def validate_account(org, account): + sorted(account.keys()).should.equal([ + 'Arn', + 'Email', + 'Id', + 'JoinedMethod', + 'JoinedTimestamp', + 'Name', + 'Status', + ]) + account['Id'].should.match(ACCOUNT_ID_REGEX) + account['Arn'].should.equal(utils.ACCOUNT_ARN_FORMAT.format( + org['MasterAccountId'], + org['Id'], + account['Id'], + )) + account['Email'].should.match(EMAIL_REGEX) + account['JoinedMethod'].should.be.within(['INVITED', 'CREATED']) + account['Status'].should.be.within(['ACTIVE', 'SUSPENDED']) + account['Name'].should.be.a(six.string_types) + account['JoinedTimestamp'].should.be.a(datetime.datetime) + + +def validate_create_account_status(create_status): + sorted(create_status.keys()).should.equal([ + 'AccountId', + 'AccountName', + 'CompletedTimestamp', + 'Id', + 'RequestedTimestamp', + 'State', + ]) + create_status['Id'].should.match(CREATE_ACCOUNT_STATUS_ID_REGEX) + create_status['AccountId'].should.match(ACCOUNT_ID_REGEX) + create_status['AccountName'].should.be.a(six.string_types) + create_status['State'].should.equal('SUCCEEDED') + create_status['RequestedTimestamp'].should.be.a(datetime.datetime) + create_status['CompletedTimestamp'].should.be.a(datetime.datetime) diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index dfac5feeb..ea3e17962 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -1,322 +1,322 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_organizations -from moto.organizations import utils -from .organizations_test_utils import ( - validate_organization, - validate_roots, - validate_organizational_unit, - validate_account, - validate_create_account_status, -) - - -@mock_organizations -def test_create_organization(): - client = boto3.client('organizations', region_name='us-east-1') - response = client.create_organization(FeatureSet='ALL') - validate_organization(response) - response['Organization']['FeatureSet'].should.equal('ALL') - - -@mock_organizations -def test_describe_organization(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - response = client.describe_organization() - validate_organization(response) - - -@mock_organizations -def test_describe_organization_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_organization() - ex = e.exception - ex.operation_name.should.equal('DescribeOrganization') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') - - -# Organizational Units - -@mock_organizations -def test_list_roots(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - response = client.list_roots() - validate_roots(org, response) - - -@mock_organizations -def test_create_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_name = 'ou01' - response = client.create_organizational_unit( - ParentId=root_id, - Name=ou_name, - ) - validate_organizational_unit(org, response) - response['OrganizationalUnit']['Name'].should.equal(ou_name) - - -@mock_organizations -def test_describe_organizational_unit(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou_id = client.create_organizational_unit( - ParentId=root_id, - Name='ou01', - )['OrganizationalUnit']['Id'] - response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) - validate_organizational_unit(org, response) - - -@mock_organizations -def test_describe_organizational_unit_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - with assert_raises(ClientError) as e: - response = client.describe_organizational_unit( - OrganizationalUnitId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('DescribeOrganizationalUnit') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') - - -@mock_organizations -def test_list_organizational_units_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - client.create_organizational_unit(ParentId=root_id, Name='ou01') - client.create_organizational_unit(ParentId=root_id, Name='ou02') - client.create_organizational_unit(ParentId=root_id, Name='ou03') - response = client.list_organizational_units_for_parent(ParentId=root_id) - response.should.have.key('OrganizationalUnits').should.be.a(list) - for ou in response['OrganizationalUnits']: - validate_organizational_unit(org, dict(OrganizationalUnit=ou)) - - -@mock_organizations -def test_list_organizational_units_for_parent_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.list_organizational_units_for_parent( - ParentId=utils.make_random_root_id() - ) - ex = e.exception - ex.operation_name.should.equal('ListOrganizationalUnitsForParent') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - - -# Accounts -mockname = 'mock-account' -mockdomain = 'moto-example.org' -mockemail = '@'.join([mockname, mockdomain]) - - -@mock_organizations -def test_create_account(): - client = boto3.client('organizations', region_name='us-east-1') - client.create_organization(FeatureSet='ALL') - create_status = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus'] - validate_create_account_status(create_status) - create_status['AccountName'].should.equal(mockname) - - -@mock_organizations -def test_describe_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - response = client.describe_account(AccountId=account_id) - validate_account(org, response['Account']) - response['Account']['Name'].should.equal(mockname) - response['Account']['Email'].should.equal(mockemail) - - -@mock_organizations -def test_describe_account_exception(): - client = boto3.client('organizations', region_name='us-east-1') - with assert_raises(ClientError) as e: - response = client.describe_account(AccountId=utils.make_random_account_id()) - ex = e.exception - ex.operation_name.should.equal('DescribeAccount') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('AccountNotFoundException') - - -@mock_organizations -def test_list_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - for i in range(5): - name = mockname + str(i) - email = name + '@' + mockdomain - client.create_account(AccountName=name, Email=email) - response = client.list_accounts() - response.should.have.key('Accounts') - accounts = response['Accounts'] - len(accounts).should.equal(5) - for account in accounts: - validate_account(org, account) - accounts[3]['Name'].should.equal(mockname + '3') - accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) - - -@mock_organizations -def test_list_accounts_for_parent(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, - Email=mockemail, - )['CreateAccountStatus']['AccountId'] - response = client.list_accounts_for_parent(ParentId=root_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_move_account(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - account_id = client.create_account( - AccountName=mockname, Email=mockemail - )['CreateAccountStatus']['AccountId'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - client.move_account( - AccountId=account_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response = client.list_accounts_for_parent(ParentId=ou01_id) - account_id.should.be.within([account['Id'] for account in response['Accounts']]) - - -@mock_organizations -def test_list_parents_for_ou(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - response01 = client.list_parents(ChildId=ou01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - response02 = client.list_parents(ChildId=ou02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_parents_for_accounts(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_parents(ChildId=account01_id) - response01.should.have.key('Parents').should.be.a(list) - response01['Parents'][0].should.have.key('Id').should.equal(root_id) - response01['Parents'][0].should.have.key('Type').should.equal('ROOT') - response02 = client.list_parents(ChildId=account02_id) - response02.should.have.key('Parents').should.be.a(list) - response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) - response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') - ou01_id = ou01['OrganizationalUnit']['Id'] - ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') - ou02_id = ou02['OrganizationalUnit']['Id'] - account01_id = client.create_account( - AccountName='account01', - Email='account01@moto-example.org' - )['CreateAccountStatus']['AccountId'] - account02_id = client.create_account( - AccountName='account02', - Email='account02@moto-example.org' - )['CreateAccountStatus']['AccountId'] - client.move_account( - AccountId=account02_id, - SourceParentId=root_id, - DestinationParentId=ou01_id, - ) - response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') - response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') - response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') - response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') - response01['Children'][0]['Id'].should.equal(account01_id) - response01['Children'][0]['Type'].should.equal('ACCOUNT') - response02['Children'][0]['Id'].should.equal(ou01_id) - response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - response03['Children'][0]['Id'].should.equal(account02_id) - response03['Children'][0]['Type'].should.equal('ACCOUNT') - response04['Children'][0]['Id'].should.equal(ou02_id) - response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') - - -@mock_organizations -def test_list_children_exception(): - client = boto3.client('organizations', region_name='us-east-1') - org = client.create_organization(FeatureSet='ALL')['Organization'] - root_id = client.list_roots()['Roots'][0]['Id'] - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=utils.make_random_root_id(), - ChildType='ACCOUNT' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('ParentNotFoundException') - with assert_raises(ClientError) as e: - response = client.list_children( - ParentId=root_id, - ChildType='BLEE' - ) - ex = e.exception - ex.operation_name.should.equal('ListChildren') - ex.response['Error']['Code'].should.equal('400') - ex.response['Error']['Message'].should.contain('InvalidInputException') +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_organizations +from moto.organizations import utils +from .organizations_test_utils import ( + validate_organization, + validate_roots, + validate_organizational_unit, + validate_account, + validate_create_account_status, +) + + +@mock_organizations +def test_create_organization(): + client = boto3.client('organizations', region_name='us-east-1') + response = client.create_organization(FeatureSet='ALL') + validate_organization(response) + response['Organization']['FeatureSet'].should.equal('ALL') + + +@mock_organizations +def test_describe_organization(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + response = client.describe_organization() + validate_organization(response) + + +@mock_organizations +def test_describe_organization_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_organization() + ex = e.exception + ex.operation_name.should.equal('DescribeOrganization') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AWSOrganizationsNotInUseException') + + +# Organizational Units + +@mock_organizations +def test_list_roots(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + response = client.list_roots() + validate_roots(org, response) + + +@mock_organizations +def test_create_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_name = 'ou01' + response = client.create_organizational_unit( + ParentId=root_id, + Name=ou_name, + ) + validate_organizational_unit(org, response) + response['OrganizationalUnit']['Name'].should.equal(ou_name) + + +@mock_organizations +def test_describe_organizational_unit(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou_id = client.create_organizational_unit( + ParentId=root_id, + Name='ou01', + )['OrganizationalUnit']['Id'] + response = client.describe_organizational_unit(OrganizationalUnitId=ou_id) + validate_organizational_unit(org, response) + + +@mock_organizations +def test_describe_organizational_unit_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + with assert_raises(ClientError) as e: + response = client.describe_organizational_unit( + OrganizationalUnitId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('DescribeOrganizationalUnit') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('OrganizationalUnitNotFoundException') + + +@mock_organizations +def test_list_organizational_units_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + client.create_organizational_unit(ParentId=root_id, Name='ou01') + client.create_organizational_unit(ParentId=root_id, Name='ou02') + client.create_organizational_unit(ParentId=root_id, Name='ou03') + response = client.list_organizational_units_for_parent(ParentId=root_id) + response.should.have.key('OrganizationalUnits').should.be.a(list) + for ou in response['OrganizationalUnits']: + validate_organizational_unit(org, dict(OrganizationalUnit=ou)) + + +@mock_organizations +def test_list_organizational_units_for_parent_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.list_organizational_units_for_parent( + ParentId=utils.make_random_root_id() + ) + ex = e.exception + ex.operation_name.should.equal('ListOrganizationalUnitsForParent') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + + +# Accounts +mockname = 'mock-account' +mockdomain = 'moto-example.org' +mockemail = '@'.join([mockname, mockdomain]) + + +@mock_organizations +def test_create_account(): + client = boto3.client('organizations', region_name='us-east-1') + client.create_organization(FeatureSet='ALL') + create_status = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus'] + validate_create_account_status(create_status) + create_status['AccountName'].should.equal(mockname) + + +@mock_organizations +def test_describe_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + response = client.describe_account(AccountId=account_id) + validate_account(org, response['Account']) + response['Account']['Name'].should.equal(mockname) + response['Account']['Email'].should.equal(mockemail) + + +@mock_organizations +def test_describe_account_exception(): + client = boto3.client('organizations', region_name='us-east-1') + with assert_raises(ClientError) as e: + response = client.describe_account(AccountId=utils.make_random_account_id()) + ex = e.exception + ex.operation_name.should.equal('DescribeAccount') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('AccountNotFoundException') + + +@mock_organizations +def test_list_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + for i in range(5): + name = mockname + str(i) + email = name + '@' + mockdomain + client.create_account(AccountName=name, Email=email) + response = client.list_accounts() + response.should.have.key('Accounts') + accounts = response['Accounts'] + len(accounts).should.equal(5) + for account in accounts: + validate_account(org, account) + accounts[3]['Name'].should.equal(mockname + '3') + accounts[2]['Email'].should.equal(mockname + '2' + '@' + mockdomain) + + +@mock_organizations +def test_list_accounts_for_parent(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, + Email=mockemail, + )['CreateAccountStatus']['AccountId'] + response = client.list_accounts_for_parent(ParentId=root_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_move_account(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + account_id = client.create_account( + AccountName=mockname, Email=mockemail + )['CreateAccountStatus']['AccountId'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + client.move_account( + AccountId=account_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response = client.list_accounts_for_parent(ParentId=ou01_id) + account_id.should.be.within([account['Id'] for account in response['Accounts']]) + + +@mock_organizations +def test_list_parents_for_ou(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + response01 = client.list_parents(ChildId=ou01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + response02 = client.list_parents(ChildId=ou02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_parents_for_accounts(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_parents(ChildId=account01_id) + response01.should.have.key('Parents').should.be.a(list) + response01['Parents'][0].should.have.key('Id').should.equal(root_id) + response01['Parents'][0].should.have.key('Type').should.equal('ROOT') + response02 = client.list_parents(ChildId=account02_id) + response02.should.have.key('Parents').should.be.a(list) + response02['Parents'][0].should.have.key('Id').should.equal(ou01_id) + response02['Parents'][0].should.have.key('Type').should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + ou01 = client.create_organizational_unit(ParentId=root_id, Name='ou01') + ou01_id = ou01['OrganizationalUnit']['Id'] + ou02 = client.create_organizational_unit(ParentId=ou01_id, Name='ou02') + ou02_id = ou02['OrganizationalUnit']['Id'] + account01_id = client.create_account( + AccountName='account01', + Email='account01@moto-example.org' + )['CreateAccountStatus']['AccountId'] + account02_id = client.create_account( + AccountName='account02', + Email='account02@moto-example.org' + )['CreateAccountStatus']['AccountId'] + client.move_account( + AccountId=account02_id, + SourceParentId=root_id, + DestinationParentId=ou01_id, + ) + response01 = client.list_children(ParentId=root_id, ChildType='ACCOUNT') + response02 = client.list_children(ParentId=root_id, ChildType='ORGANIZATIONAL_UNIT') + response03 = client.list_children(ParentId=ou01_id, ChildType='ACCOUNT') + response04 = client.list_children(ParentId=ou01_id, ChildType='ORGANIZATIONAL_UNIT') + response01['Children'][0]['Id'].should.equal(account01_id) + response01['Children'][0]['Type'].should.equal('ACCOUNT') + response02['Children'][0]['Id'].should.equal(ou01_id) + response02['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + response03['Children'][0]['Id'].should.equal(account02_id) + response03['Children'][0]['Type'].should.equal('ACCOUNT') + response04['Children'][0]['Id'].should.equal(ou02_id) + response04['Children'][0]['Type'].should.equal('ORGANIZATIONAL_UNIT') + + +@mock_organizations +def test_list_children_exception(): + client = boto3.client('organizations', region_name='us-east-1') + org = client.create_organization(FeatureSet='ALL')['Organization'] + root_id = client.list_roots()['Roots'][0]['Id'] + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=utils.make_random_root_id(), + ChildType='ACCOUNT' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('ParentNotFoundException') + with assert_raises(ClientError) as e: + response = client.list_children( + ParentId=root_id, + ChildType='BLEE' + ) + ex = e.exception + ex.operation_name.should.equal('ListChildren') + ex.response['Error']['Code'].should.equal('400') + ex.response['Error']['Message'].should.contain('InvalidInputException') diff --git a/tests/test_polly/test_polly.py b/tests/test_polly/test_polly.py index c5c864835..ec85142fa 100644 --- a/tests/test_polly/test_polly.py +++ b/tests/test_polly/test_polly.py @@ -1,275 +1,275 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError -import boto3 -import sure # noqa -from nose.tools import assert_raises -from moto import mock_polly - -# Polly only available in a few regions -DEFAULT_REGION = 'eu-west-1' - -LEXICON_XML = """ - - - W3C - World Wide Web Consortium - -""" - - -@mock_polly -def test_describe_voices(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - resp = client.describe_voices() - len(resp['Voices']).should.be.greater_than(1) - - resp = client.describe_voices(LanguageCode='en-GB') - len(resp['Voices']).should.equal(3) - - try: - client.describe_voices(LanguageCode='SOME_LANGUAGE') - except ClientError as err: - err.response['Error']['Code'].should.equal('400') - else: - raise RuntimeError('Should of raised an exception') - - -@mock_polly -def test_put_list_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - resp = client.list_lexicons() - len(resp['Lexicons']).should.equal(1) - - -@mock_polly -def test_put_get_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - resp = client.get_lexicon(Name='test') - resp.should.contain('Lexicon') - resp.should.contain('LexiconAttributes') - - -@mock_polly -def test_put_lexicon_bad_name(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - try: - client.put_lexicon( - Name='test-invalid', - Content=LEXICON_XML - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised an exception') - - -@mock_polly -def test_synthesize_speech(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - - # Return nothing - client.put_lexicon( - Name='test', - Content=LEXICON_XML - ) - - tests = ( - ('pcm', 'audio/pcm'), - ('mp3', 'audio/mpeg'), - ('ogg_vorbis', 'audio/ogg'), - ) - for output_format, content_type in tests: - resp = client.synthesize_speech( - LexiconNames=['test'], - OutputFormat=output_format, - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - resp['ContentType'].should.equal(content_type) - - -@mock_polly -def test_synthesize_speech_bad_lexicon(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test2'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('LexiconNotFoundException') - else: - raise RuntimeError('Should of raised LexiconNotFoundException') - - -@mock_polly -def test_synthesize_speech_bad_output_format(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='invalid', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_sample_rate(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='18000', - Text='test1234', - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidSampleRateException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_text_type(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='invalid', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_voice_id(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - VoiceId='Luke' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_text_too_long(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234'*376, # = 3008 characters - TextType='text', - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('TextLengthExceededException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_speech_marks1(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='text', - SpeechMarkTypes=['word'], - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') - else: - raise RuntimeError('Should of raised ') - - -@mock_polly -def test_synthesize_speech_bad_speech_marks2(): - client = boto3.client('polly', region_name=DEFAULT_REGION) - client.put_lexicon(Name='test', Content=LEXICON_XML) - - try: - client.synthesize_speech( - LexiconNames=['test'], - OutputFormat='pcm', - SampleRate='16000', - Text='test1234', - TextType='ssml', - SpeechMarkTypes=['word'], - VoiceId='Astrid' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') - else: - raise RuntimeError('Should of raised ') +from __future__ import unicode_literals + +from botocore.exceptions import ClientError +import boto3 +import sure # noqa +from nose.tools import assert_raises +from moto import mock_polly + +# Polly only available in a few regions +DEFAULT_REGION = 'eu-west-1' + +LEXICON_XML = """ + + + W3C + World Wide Web Consortium + +""" + + +@mock_polly +def test_describe_voices(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + resp = client.describe_voices() + len(resp['Voices']).should.be.greater_than(1) + + resp = client.describe_voices(LanguageCode='en-GB') + len(resp['Voices']).should.equal(3) + + try: + client.describe_voices(LanguageCode='SOME_LANGUAGE') + except ClientError as err: + err.response['Error']['Code'].should.equal('400') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_put_list_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.list_lexicons() + len(resp['Lexicons']).should.equal(1) + + +@mock_polly +def test_put_get_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + resp = client.get_lexicon(Name='test') + resp.should.contain('Lexicon') + resp.should.contain('LexiconAttributes') + + +@mock_polly +def test_put_lexicon_bad_name(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + try: + client.put_lexicon( + Name='test-invalid', + Content=LEXICON_XML + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised an exception') + + +@mock_polly +def test_synthesize_speech(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + + # Return nothing + client.put_lexicon( + Name='test', + Content=LEXICON_XML + ) + + tests = ( + ('pcm', 'audio/pcm'), + ('mp3', 'audio/mpeg'), + ('ogg_vorbis', 'audio/ogg'), + ) + for output_format, content_type in tests: + resp = client.synthesize_speech( + LexiconNames=['test'], + OutputFormat=output_format, + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + resp['ContentType'].should.equal(content_type) + + +@mock_polly +def test_synthesize_speech_bad_lexicon(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test2'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('LexiconNotFoundException') + else: + raise RuntimeError('Should of raised LexiconNotFoundException') + + +@mock_polly +def test_synthesize_speech_bad_output_format(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='invalid', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_sample_rate(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='18000', + Text='test1234', + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidSampleRateException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_text_type(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='invalid', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_voice_id(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + VoiceId='Luke' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_text_too_long(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234'*376, # = 3008 characters + TextType='text', + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('TextLengthExceededException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks1(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='text', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') + + +@mock_polly +def test_synthesize_speech_bad_speech_marks2(): + client = boto3.client('polly', region_name=DEFAULT_REGION) + client.put_lexicon(Name='test', Content=LEXICON_XML) + + try: + client.synthesize_speech( + LexiconNames=['test'], + OutputFormat='pcm', + SampleRate='16000', + Text='test1234', + TextType='ssml', + SpeechMarkTypes=['word'], + VoiceId='Astrid' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException') + else: + raise RuntimeError('Should of raised ') diff --git a/tests/test_polly/test_server.py b/tests/test_polly/test_server.py index 3ae7f2254..e080c7551 100644 --- a/tests/test_polly/test_server.py +++ b/tests/test_polly/test_server.py @@ -1,19 +1,19 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_polly - -''' -Test the different server responses -''' - - -@mock_polly -def test_polly_list(): - backend = server.create_backend_app("polly") - test_client = backend.test_client() - - res = test_client.get('/v1/lexicons') - res.status_code.should.equal(200) +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_polly + +''' +Test the different server responses +''' + + +@mock_polly +def test_polly_list(): + backend = server.create_backend_app("polly") + test_client = backend.test_client() + + res = test_client.get('/v1/lexicons') + res.status_code.should.equal(200) diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 5bf733dc6..064598012 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -1,324 +1,324 @@ -from __future__ import unicode_literals - -import boto3 -import boto.rds -import boto.vpc -from boto.exception import BotoServerError -import sure # noqa - -from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds -from tests.helpers import disable_on_py3 - - -@mock_rds_deprecated -def test_create_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_get_databases(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') - - list(conn.get_all_dbinstances()).should.have.length_of(2) - - databases = conn.get_all_dbinstances("db-master-1") - list(databases).should.have.length_of(1) - - databases[0].id.should.equal("db-master-1") - - -@mock_rds -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - -@mock_rds_deprecated -def test_describe_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbinstances.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database(): - conn = boto.rds.connect_to_region("us-west-2") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - list(conn.get_all_dbinstances()).should.have.length_of(1) - - conn.delete_dbinstance("db-master-1") - list(conn.get_all_dbinstances()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_database(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbinstance.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_create_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - security_group.name.should.equal('db_sg') - security_group.description.should.equal("DB Security Group") - list(security_group.ip_ranges).should.equal([]) - - -@mock_rds_deprecated -def test_get_security_groups(): - conn = boto.rds.connect_to_region("us-west-2") - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - conn.create_dbsecurity_group('db_sg1', 'DB Security Group') - conn.create_dbsecurity_group('db_sg2', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) - - databases = conn.get_all_dbsecurity_groups("db_sg1") - list(databases).should.have.length_of(1) - - databases[0].name.should.equal("db_sg1") - - -@mock_rds_deprecated -def test_get_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.get_all_dbsecurity_groups.when.called_with( - "not-a-sg").should.throw(BotoServerError) - - -@mock_rds_deprecated -def test_delete_database_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.create_dbsecurity_group('db_sg', 'DB Security Group') - - list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) - - conn.delete_dbsecurity_group("db_sg") - list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) - - -@mock_rds_deprecated -def test_delete_non_existant_security_group(): - conn = boto.rds.connect_to_region("us-west-2") - conn.delete_dbsecurity_group.when.called_with( - "not-a-db").should.throw(BotoServerError) - - -@disable_on_py3() -@mock_rds_deprecated -def test_security_group_authorize(): - conn = boto.rds.connect_to_region("us-west-2") - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - list(security_group.ip_ranges).should.equal([]) - - security_group.authorize(cidr_ip='10.3.2.45/32') - security_group = conn.get_all_dbsecurity_groups()[0] - list(security_group.ip_ranges).should.have.length_of(1) - security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') - - -@mock_rds_deprecated -def test_add_security_group_to_database(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') - database.modify(security_groups=[security_group]) - - database = conn.get_all_dbinstances()[0] - list(database.security_groups).should.have.length_of(1) - - database.security_groups[0].name.should.equal("db_sg") - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_add_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") - - subnet_ids = [subnet1.id, subnet2.id] - conn = boto.rds.connect_to_region("us-west-2") - subnet_group = conn.create_db_subnet_group( - "db_subnet", "my db subnet", subnet_ids) - subnet_group.name.should.equal('db_subnet') - subnet_group.description.should.equal("my db subnet") - list(subnet_group.subnet_ids).should.equal(subnet_ids) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_describe_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) - - list(conn.get_all_db_subnet_groups()).should.have.length_of(2) - list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) - - conn.get_all_db_subnet_groups.when.called_with( - "not-a-subnet").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_delete_database_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - list(conn.get_all_db_subnet_groups()).should.have.length_of(1) - - conn.delete_db_subnet_group("db_subnet1") - list(conn.get_all_db_subnet_groups()).should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - "db_subnet1").should.throw(BotoServerError) - - -@mock_ec2_deprecated -@mock_rds_deprecated -def test_create_database_in_subnet_group(): - vpc_conn = boto.vpc.connect_to_region("us-west-2") - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") - - conn = boto.rds.connect_to_region("us-west-2") - conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', - 'root', 'hunter2', db_subnet_group_name="db_subnet1") - - database = conn.get_all_dbinstances("db-master-1")[0] - database.subnet_group.name.should.equal("db_subnet1") - - -@mock_rds_deprecated -def test_create_database_replica(): - conn = boto.rds.connect_to_region("us-west-2") - - primary = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - replica = conn.create_dbinstance_read_replica( - "replica", "db-master-1", "db.m1.small") - replica.id.should.equal("replica") - replica.instance_class.should.equal("db.m1.small") - status_info = replica.status_infos[0] - status_info.normal.should.equal(True) - status_info.status_type.should.equal('read replication') - status_info.status.should.equal('replicating') - - primary = conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - conn.delete_dbinstance("replica") - - primary = conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_create_cross_region_database_replica(): - west_1_conn = boto.rds.connect_to_region("us-west-1") - west_2_conn = boto.rds.connect_to_region("us-west-2") - - primary = west_1_conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') - - primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" - replica = west_2_conn.create_dbinstance_read_replica( - "replica", - primary_arn, - "db.m1.small", - ) - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - primary.read_replica_dbinstance_identifiers[0].should.equal("replica") - - replica = west_2_conn.get_all_dbinstances("replica")[0] - replica.instance_class.should.equal("db.m1.small") - - west_2_conn.delete_dbinstance("replica") - - primary = west_1_conn.get_all_dbinstances("db-master-1")[0] - list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) - - -@mock_rds_deprecated -def test_connecting_to_us_east_1(): - # boto does not use us-east-1 in the URL for RDS, - # and that broke moto in the past: - # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 - conn = boto.rds.connect_to_region("us-east-1") - - database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', - security_groups=["my_sg"]) - - database.status.should.equal('available') - database.id.should.equal("db-master-1") - database.allocated_storage.should.equal(10) - database.instance_class.should.equal("db.m1.small") - database.master_username.should.equal("root") - database.endpoint.should.equal( - ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) - database.security_groups[0].name.should.equal('my_sg') - - -@mock_rds_deprecated -def test_create_database_with_iops(): - conn = boto.rds.connect_to_region("us-west-2") - - database = conn.create_dbinstance( - "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) - - database.status.should.equal('available') - database.iops.should.equal(6000) - # boto>2.36.0 may change the following property name to `storage_type` - database.StorageType.should.equal('io1') +from __future__ import unicode_literals + +import boto3 +import boto.rds +import boto.vpc +from boto.exception import BotoServerError +import sure # noqa + +from moto import mock_ec2_deprecated, mock_rds_deprecated, mock_rds +from tests.helpers import disable_on_py3 + + +@mock_rds_deprecated +def test_create_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-west-2.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_get_databases(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + conn.create_dbinstance("db-master-2", 10, 'db.m1.small', 'root', 'hunter2') + + list(conn.get_all_dbinstances()).should.have.length_of(2) + + databases = conn.get_all_dbinstances("db-master-1") + list(databases).should.have.length_of(1) + + databases[0].id.should.equal("db-master-1") + + +@mock_rds +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + +@mock_rds_deprecated +def test_describe_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbinstances.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database(): + conn = boto.rds.connect_to_region("us-west-2") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + list(conn.get_all_dbinstances()).should.have.length_of(1) + + conn.delete_dbinstance("db-master-1") + list(conn.get_all_dbinstances()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_database(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbinstance.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_create_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + security_group.name.should.equal('db_sg') + security_group.description.should.equal("DB Security Group") + list(security_group.ip_ranges).should.equal([]) + + +@mock_rds_deprecated +def test_get_security_groups(): + conn = boto.rds.connect_to_region("us-west-2") + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + conn.create_dbsecurity_group('db_sg1', 'DB Security Group') + conn.create_dbsecurity_group('db_sg2', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(2) + + databases = conn.get_all_dbsecurity_groups("db_sg1") + list(databases).should.have.length_of(1) + + databases[0].name.should.equal("db_sg1") + + +@mock_rds_deprecated +def test_get_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.get_all_dbsecurity_groups.when.called_with( + "not-a-sg").should.throw(BotoServerError) + + +@mock_rds_deprecated +def test_delete_database_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.create_dbsecurity_group('db_sg', 'DB Security Group') + + list(conn.get_all_dbsecurity_groups()).should.have.length_of(1) + + conn.delete_dbsecurity_group("db_sg") + list(conn.get_all_dbsecurity_groups()).should.have.length_of(0) + + +@mock_rds_deprecated +def test_delete_non_existant_security_group(): + conn = boto.rds.connect_to_region("us-west-2") + conn.delete_dbsecurity_group.when.called_with( + "not-a-db").should.throw(BotoServerError) + + +@disable_on_py3() +@mock_rds_deprecated +def test_security_group_authorize(): + conn = boto.rds.connect_to_region("us-west-2") + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + list(security_group.ip_ranges).should.equal([]) + + security_group.authorize(cidr_ip='10.3.2.45/32') + security_group = conn.get_all_dbsecurity_groups()[0] + list(security_group.ip_ranges).should.have.length_of(1) + security_group.ip_ranges[0].cidr_ip.should.equal('10.3.2.45/32') + + +@mock_rds_deprecated +def test_add_security_group_to_database(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + security_group = conn.create_dbsecurity_group('db_sg', 'DB Security Group') + database.modify(security_groups=[security_group]) + + database = conn.get_all_dbinstances()[0] + list(database.security_groups).should.have.length_of(1) + + database.security_groups[0].name.should.equal("db_sg") + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_add_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.2.0.0/24") + + subnet_ids = [subnet1.id, subnet2.id] + conn = boto.rds.connect_to_region("us-west-2") + subnet_group = conn.create_db_subnet_group( + "db_subnet", "my db subnet", subnet_ids) + subnet_group.name.should.equal('db_subnet') + subnet_group.description.should.equal("my db subnet") + list(subnet_group.subnet_ids).should.equal(subnet_ids) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_describe_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + conn.create_db_subnet_group("db_subnet2", "my db subnet", [subnet.id]) + + list(conn.get_all_db_subnet_groups()).should.have.length_of(2) + list(conn.get_all_db_subnet_groups("db_subnet1")).should.have.length_of(1) + + conn.get_all_db_subnet_groups.when.called_with( + "not-a-subnet").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_delete_database_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + list(conn.get_all_db_subnet_groups()).should.have.length_of(1) + + conn.delete_db_subnet_group("db_subnet1") + list(conn.get_all_db_subnet_groups()).should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + "db_subnet1").should.throw(BotoServerError) + + +@mock_ec2_deprecated +@mock_rds_deprecated +def test_create_database_in_subnet_group(): + vpc_conn = boto.vpc.connect_to_region("us-west-2") + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.1.0.0/24") + + conn = boto.rds.connect_to_region("us-west-2") + conn.create_db_subnet_group("db_subnet1", "my db subnet", [subnet.id]) + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', + 'root', 'hunter2', db_subnet_group_name="db_subnet1") + + database = conn.get_all_dbinstances("db-master-1")[0] + database.subnet_group.name.should.equal("db_subnet1") + + +@mock_rds_deprecated +def test_create_database_replica(): + conn = boto.rds.connect_to_region("us-west-2") + + primary = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + replica = conn.create_dbinstance_read_replica( + "replica", "db-master-1", "db.m1.small") + replica.id.should.equal("replica") + replica.instance_class.should.equal("db.m1.small") + status_info = replica.status_infos[0] + status_info.normal.should.equal(True) + status_info.status_type.should.equal('read replication') + status_info.status.should.equal('replicating') + + primary = conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + conn.delete_dbinstance("replica") + + primary = conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_create_cross_region_database_replica(): + west_1_conn = boto.rds.connect_to_region("us-west-1") + west_2_conn = boto.rds.connect_to_region("us-west-2") + + primary = west_1_conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2') + + primary_arn = "arn:aws:rds:us-west-1:1234567890:db:db-master-1" + replica = west_2_conn.create_dbinstance_read_replica( + "replica", + primary_arn, + "db.m1.small", + ) + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + primary.read_replica_dbinstance_identifiers[0].should.equal("replica") + + replica = west_2_conn.get_all_dbinstances("replica")[0] + replica.instance_class.should.equal("db.m1.small") + + west_2_conn.delete_dbinstance("replica") + + primary = west_1_conn.get_all_dbinstances("db-master-1")[0] + list(primary.read_replica_dbinstance_identifiers).should.have.length_of(0) + + +@mock_rds_deprecated +def test_connecting_to_us_east_1(): + # boto does not use us-east-1 in the URL for RDS, + # and that broke moto in the past: + # https://github.com/boto/boto/blob/e271ff09364ea18d9d8b6f4d63d6b0ac6cbc9b75/boto/endpoints.json#L285 + conn = boto.rds.connect_to_region("us-east-1") + + database = conn.create_dbinstance("db-master-1", 10, 'db.m1.small', 'root', 'hunter2', + security_groups=["my_sg"]) + + database.status.should.equal('available') + database.id.should.equal("db-master-1") + database.allocated_storage.should.equal(10) + database.instance_class.should.equal("db.m1.small") + database.master_username.should.equal("root") + database.endpoint.should.equal( + ('db-master-1.aaaaaaaaaa.us-east-1.rds.amazonaws.com', 3306)) + database.security_groups[0].name.should.equal('my_sg') + + +@mock_rds_deprecated +def test_create_database_with_iops(): + conn = boto.rds.connect_to_region("us-west-2") + + database = conn.create_dbinstance( + "db-master-1", 10, 'db.m1.small', 'root', 'hunter2', iops=6000) + + database.status.should.equal('available') + database.iops.should.equal(6000) + # boto>2.36.0 may change the following property name to `storage_type` + database.StorageType.should.equal('io1') diff --git a/tests/test_rds/test_server.py b/tests/test_rds/test_server.py index 224704a0b..814620331 100644 --- a/tests/test_rds/test_server.py +++ b/tests/test_rds/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_rds - -''' -Test the different server responses -''' - - -@mock_rds -def test_list_databases(): - backend = server.create_backend_app("rds") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeDBInstances') - - res.data.decode("utf-8").should.contain("") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_rds + +''' +Test the different server responses +''' + + +@mock_rds +def test_list_databases(): + backend = server.create_backend_app("rds") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeDBInstances') + + res.data.decode("utf-8").should.contain("") diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index cf9805444..311cd7fd7 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1,1472 +1,1472 @@ -from __future__ import unicode_literals - -from botocore.exceptions import ClientError, ParamValidationError -import boto3 -import sure # noqa -from moto import mock_ec2, mock_kms, mock_rds2 - - -@mock_rds2 -def test_create_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - db_instance = database['DBInstance'] - db_instance['AllocatedStorage'].should.equal(10) - db_instance['DBInstanceClass'].should.equal("db.m1.small") - db_instance['LicenseModel'].should.equal("license-included") - db_instance['MasterUsername'].should.equal("root") - db_instance['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('my_sg') - db_instance['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - db_instance['DBInstanceStatus'].should.equal('available') - db_instance['DBName'].should.equal('staging-postgres') - db_instance['DBInstanceIdentifier'].should.equal("db-master-1") - db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) - db_instance['DbiResourceId'].should.contain("db-") - db_instance['CopyTagsToSnapshot'].should.equal(False) - - -@mock_rds2 -def test_stop_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test stopping database should shutdown - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - # test rdsclient error when trying to stop an already stopped database - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # test stopping a stopped database with snapshot should error and no snapshot should exist for that call - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - response = conn.describe_db_snapshots() - response['DBSnapshots'].should.equal([]) - - -@mock_rds2 -def test_start_database(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # test starting an already started database should error - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from stopped to available, create snapshot and check snapshot - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('available') - # starting database should not remove snapshot - response = conn.describe_db_snapshots() - response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') - # test stopping database, create snapshot with existing snapshot already created should throw error - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) - # test stopping database not invoking snapshot should succeed. - response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('stopped') - - -@mock_rds2 -def test_fail_to_stop_multi_az(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - MultiAZ=True) - - mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # multi-az databases arent allowed to be shutdown at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # multi-az databases arent allowed to be started up at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_fail_to_stop_readreplica(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - LicenseModel='license-included', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - - mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] - mydb['DBInstanceStatus'].should.equal('available') - # read-replicas are not allowed to be stopped at this time. - conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # read-replicas are not allowed to be started at this time. - conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - - -@mock_rds2 -def test_get_databases(): - conn = boto3.client('rds', region_name='us-west-2') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - conn.create_db_instance(DBInstanceIdentifier='db-master-2', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(2) - - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - instances['DBInstances'][0][ - 'DBInstanceIdentifier'].should.equal("db-master-1") - instances['DBInstances'][0]['DBInstanceArn'].should.equal( - 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - - -@mock_rds2 -def test_get_databases_paginated(): - conn = boto3.client('rds', region_name="us-west-2") - - for i in range(51): - conn.create_db_instance(AllocatedStorage=5, - Port=5432, - DBInstanceIdentifier='rds%d' % i, - DBInstanceClass='db.t1.micro', - Engine='postgres') - - resp = conn.describe_db_instances() - resp["DBInstances"].should.have.length_of(50) - resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) - - resp2 = conn.describe_db_instances(Marker=resp["Marker"]) - resp2["DBInstances"].should.have.length_of(1) - - resp3 = conn.describe_db_instances(MaxRecords=100) - resp3["DBInstances"].should.have.length_of(51) - -@mock_rds2 -def test_describe_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_instances.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_modify_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=20, - ApplyImmediately=True) - instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) - - -@mock_rds2 -def test_rename_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - list(instances['DBInstances']).should.have.length_of(1) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - NewDBInstanceIdentifier='db-master-2', - ApplyImmediately=True) - conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) - instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") - list(instances['DBInstances']).should.have.length_of(1) - - -@mock_rds2 -def test_modify_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', - AllocatedStorage=20, - ApplyImmediately=True).should.throw(ClientError) - - -@mock_rds2 -def test_reboot_db_instance(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") - - -@mock_rds2 -def test_reboot_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.reboot_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database(): - conn = boto3.client('rds', region_name='us-west-2') - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg']) - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(1) - - conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", - FinalDBSnapshotIdentifier='primary-1-snapshot') - - instances = conn.describe_db_instances() - list(instances['DBInstances']).should.have.length_of(0) - - # Saved the snapshot - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') - snapshots[0].get('Engine').should.equal('postgres') - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds2', region_name="us-west-2") - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_create_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([]) - - -@mock_rds2 -def test_create_db_snapshots_copy_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_snapshot.when.called_with( - DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - CopyTagsToSnapshot=True, - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='g-1').get('DBSnapshot') - - snapshot.get('Engine').should.equal('postgres') - snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') - snapshot.get('DBSnapshotIdentifier').should.equal('g-1') - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_describe_db_snapshots(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') - - created.get('Engine').should.equal('postgres') - - by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') - by_snapshot_id.should.equal(by_database_id) - - snapshot = by_snapshot_id[0] - snapshot.should.equal(created) - snapshot.get('Engine').should.equal('postgres') - - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-2') - snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') - snapshots.should.have.length_of(2) - - -@mock_rds2 -def test_delete_db_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-1') - - conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] - conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') - conn.describe_db_snapshots.when.called_with( - DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - option_group = conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_group['OptionGroup']['OptionGroupName'].should.equal('test') - option_group['OptionGroup']['EngineName'].should.equal('mysql') - option_group['OptionGroup'][ - 'OptionGroupDescription'].should.equal('test option group') - option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') - - -@mock_rds2 -def test_create_option_group_bad_engine_name(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='invalid_engine', - MajorEngineVersion='5.6', - OptionGroupDescription='test invalid engine').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_bad_engine_major_version(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='6.6.6', - OptionGroupDescription='test invalid engine version').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='').should.throw(ClientError) - - -@mock_rds2 -def test_create_option_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - conn.create_option_group.when.called_with(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_option_groups.when.called_with( - OptionGroupName="not-a-option-group").should.throw(ClientError) - - -@mock_rds2 -def test_delete_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - option_groups = conn.describe_option_groups(OptionGroupName='test') - option_groups['OptionGroupsList'][0][ - 'OptionGroupName'].should.equal('test') - conn.delete_option_group(OptionGroupName='test') - conn.describe_option_groups.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_delete_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_option_group.when.called_with( - OptionGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_describe_option_group_options(): - conn = boto3.client('rds', region_name='us-west-2') - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee') - len(option_group_options['OptionGroupOptions']).should.equal(4) - option_group_options = conn.describe_option_group_options( - EngineName='sqlserver-ee', MajorEngineVersion='11.00') - len(option_group_options['OptionGroupOptions']).should.equal(2) - option_group_options = conn.describe_option_group_options( - EngineName='mysql', MajorEngineVersion='5.6') - len(option_group_options['OptionGroupOptions']).should.equal(1) - conn.describe_option_group_options.when.called_with( - EngineName='non-existent').should.throw(ClientError) - conn.describe_option_group_options.when.called_with( - EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) - - -@mock_rds2 -def test_modify_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - # TODO: create option and validate before deleting. - # if Someone can tell me how the hell to use this function - # to add options to an option_group, I can finish coding this. - result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ - ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) - result['OptionGroup']['EngineName'].should.equal('mysql') - result['OptionGroup']['Options'].should.equal([]) - result['OptionGroup']['OptionGroupName'].should.equal('test') - - -@mock_rds2 -def test_modify_option_group_no_options(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', EngineName='mysql', - MajorEngineVersion='5.6', OptionGroupDescription='test option group') - conn.modify_option_group.when.called_with( - OptionGroupName='test').should.throw(ClientError) - - -@mock_rds2 -def test_modify_non_existant_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( - 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) - - -@mock_rds2 -def test_delete_non_existant_database(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_instance.when.called_with( - DBInstanceIdentifier="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_invalid_arn(): - conn = boto3.client('rds', region_name='us-west-2') - conn.list_tags_for_resource.when.called_with( - ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) - - -@mock_rds2 -def test_list_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') - result['TagList'].should.equal([]) - test_instance = conn.create_db_instance( - DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName=test_instance['DBInstance']['DBInstanceArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-without-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_db(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-with-tags', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=['my_sg'], - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_list_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') - result['TagList'].should.equal([]) - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(2) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') - list(result['TagList']).should.have.length_of(3) - - -@mock_rds2 -def test_remove_tags_snapshot(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_instance(DBInstanceIdentifier='db-primary-1', - AllocatedStorage=10, - Engine='postgres', - DBName='staging-postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', - DBSnapshotIdentifier='snapshot-with-tags', - Tags=[ - { - 'Key': 'foo', - 'Value': 'bar', - }, - { - 'Key': 'foo1', - 'Value': 'bar1', - }, - ]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') - len(result['TagList']).should.equal(1) - - -@mock_rds2 -def test_add_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(0) - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - - -@mock_rds2 -def test_remove_tags_option_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_option_group(OptionGroupName='test', - EngineName='mysql', - MajorEngineVersion='5.6', - OptionGroupDescription='test option group') - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - Tags=[ - { - 'Key': 'foo', - 'Value': 'fish', - }, - { - 'Key': 'foo2', - 'Value': 'bar2', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(2) - conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', - TagKeys=['foo']) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') - list(result['TagList']).should.have.length_of(1) - - -@mock_rds2 -def test_create_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") - result['DBSecurityGroup'][ - 'DBSecurityGroupDescription'].should.equal("DB Security Group") - result['DBSecurityGroup']['IPRanges'].should.equal([]) - - -@mock_rds2 -def test_get_security_groups(): - conn = boto3.client('rds', region_name='us-west-2') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - conn.create_db_security_group( - DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') - conn.create_db_security_group( - DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(2) - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") - result['DBSecurityGroups'].should.have.length_of(1) - result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") - - -@mock_rds2 -def test_get_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.describe_db_security_groups.when.called_with( - DBSecurityGroupName="not-a-sg").should.throw(ClientError) - - -@mock_rds2 -def test_delete_database_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_security_group( - DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') - - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(1) - - conn.delete_db_security_group(DBSecurityGroupName="db_sg") - result = conn.describe_db_security_groups() - result['DBSecurityGroups'].should.have.length_of(0) - - -@mock_rds2 -def test_delete_non_existant_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_security_group.when.called_with( - DBSecurityGroupName="not-a-db").should.throw(ClientError) - - -@mock_rds2 -def test_security_group_authorize(): - conn = boto3.client('rds', region_name='us-west-2') - security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - security_group['DBSecurityGroup']['IPRanges'].should.equal([]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.45/32') - - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) - result['DBSecurityGroups'][0]['IPRanges'].should.equal( - [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) - - conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', - CIDRIP='10.3.2.46/32') - result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") - result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) - result['DBSecurityGroups'][0]['IPRanges'].should.equal([ - {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, - {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, - ]) - - -@mock_rds2 -def test_add_security_group_to_database(): - conn = boto3.client('rds', region_name='us-west-2') - - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - DBInstanceClass='postgres', - Engine='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) - conn.create_db_security_group(DBSecurityGroupName='db_sg', - DBSecurityGroupDescription='DB Security Group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBSecurityGroups=['db_sg']) - result = conn.describe_db_instances() - result['DBInstances'][0]['DBSecurityGroups'][0][ - 'DBSecurityGroupName'].should.equal('db_sg') - - -@mock_rds2 -def test_list_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_add_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_rds2 -def test_remove_tags_security_group(): - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", - DBSecurityGroupDescription='DB Security Group', - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] - - resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( - security_group) - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_create_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet1 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - subnet2 = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] - - subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] - conn = boto3.client('rds', region_name='us-west-2') - result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', - DBSubnetGroupDescription='my db subnet', - SubnetIds=subnet_ids) - result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") - result['DBSubnetGroup'][ - 'DBSubnetGroupDescription'].should.equal("my db subnet") - subnets = result['DBSubnetGroup']['Subnets'] - subnet_group_ids = [subnets[0]['SubnetIdentifier'], - subnets[1]['SubnetIdentifier']] - list(subnet_group_ids).should.equal(subnet_ids) - - -@mock_ec2 -@mock_rds2 -def test_create_database_in_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSubnetGroupName='db_subnet1') - result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') - result['DBInstances'][0]['DBSubnetGroup'][ - 'DBSubnetGroupName'].should.equal('db_subnet1') - - -@mock_ec2 -@mock_rds2 -def test_describe_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - - resp = conn.describe_db_subnet_groups() - resp['DBSubnetGroups'].should.have.length_of(2) - - subnets = resp['DBSubnetGroups'][0]['Subnets'] - subnets.should.have.length_of(1) - - list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") - ['DBSubnetGroups']).should.have.length_of(1) - - conn.describe_db_subnet_groups.when.called_with( - DBSubnetGroupName="not-a-subnet").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_delete_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']]) - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(1) - - conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - conn.delete_db_subnet_group.when.called_with( - DBSubnetGroupName="db_subnet1").should.throw(ClientError) - - -@mock_ec2 -@mock_rds2 -def test_list_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_add_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.add_tags_to_resource(ResourceName=resource, - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}]) - - -@mock_ec2 -@mock_rds2 -def test_remove_tags_database_subnet_group(): - vpc_conn = boto3.client('ec2', 'us-west-2') - vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] - subnet = vpc_conn.create_subnet( - VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] - - conn = boto3.client('rds', region_name='us-west-2') - result = conn.describe_db_subnet_groups() - result['DBSubnetGroups'].should.have.length_of(0) - - subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", - DBSubnetGroupDescription='my db subnet', - SubnetIds=[subnet['SubnetId']], - Tags=[{'Value': 'bar', - 'Key': 'foo'}, - {'Value': 'bar1', - 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] - resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) - - conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) - - result = conn.list_tags_for_resource(ResourceName=resource) - result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) - - -@mock_rds2 -def test_create_database_replica(): - conn = boto3.client('rds', region_name='us-west-2') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"]) - - replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", - SourceDBInstanceIdentifier="db-master-1", - DBInstanceClass="db.m1.small") - replica['DBInstance'][ - 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') - replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') - replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ - 'db-replica-1']) - - conn.delete_db_instance( - DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) - - master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") - master['DBInstances'][0][ - 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) - - -@mock_rds2 -@mock_kms -def test_create_database_with_encrypted_storage(): - kms_conn = boto3.client('kms', region_name='us-west-2') - key = kms_conn.create_key(Policy='my RDS encryption policy', - Description='RDS encryption key', - KeyUsage='ENCRYPT_DECRYPT') - - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234, - DBSecurityGroups=["my_sg"], - StorageEncrypted=True, - KmsKeyId=key['KeyMetadata']['KeyId']) - - database['DBInstance']['StorageEncrypted'].should.equal(True) - database['DBInstance']['KmsKeyId'].should.equal( - key['KeyMetadata']['KeyId']) - - -@mock_rds2 -def test_create_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupName'].should.equal('test') - db_parameter_group['DBParameterGroup'][ - 'DBParameterGroupFamily'].should.equal('mysql5.6') - db_parameter_group['DBParameterGroup'][ - 'Description'].should.equal('test parameter group') - - -@mock_rds2 -def test_create_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - DBParameterGroupName='test', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_database_with_default_port(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='postgres', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - DBSecurityGroups=["my_sg"]) - database['DBInstance']['Endpoint']['Port'].should.equal(5432) - - -@mock_rds2 -def test_modify_db_instance_with_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', - AllocatedStorage=10, - Engine='mysql', - DBInstanceClass='db.m1.small', - MasterUsername='root', - MasterUserPassword='hunter2', - Port=1234) - - len(database['DBInstance']['DBParameterGroups']).should.equal(1) - database['DBInstance']['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('default.mysql5.6') - database['DBInstance']['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.modify_db_instance(DBInstanceIdentifier='db-master-1', - DBParameterGroupName='test', - ApplyImmediately=True) - - database = conn.describe_db_instances( - DBInstanceIdentifier='db-master-1')['DBInstances'][0] - len(database['DBParameterGroups']).should.equal(1) - database['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - database['DBParameterGroups'][0][ - 'ParameterApplyStatus'].should.equal('in-sync') - - -@mock_rds2 -def test_create_db_parameter_group_empty_description(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='').should.throw(ClientError) - - -@mock_rds2 -def test_create_db_parameter_group_duplicate(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group').should.throw(ClientError) - - -@mock_rds2 -def test_describe_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - - -@mock_rds2 -def test_describe_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_delete_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - db_parameter_groups['DBParameterGroups'][0][ - 'DBParameterGroupName'].should.equal('test') - conn.delete_db_parameter_group(DBParameterGroupName='test') - db_parameter_groups = conn.describe_db_parameter_groups( - DBParameterGroupName='test') - len(db_parameter_groups['DBParameterGroups']).should.equal(0) - - -@mock_rds2 -def test_modify_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group') - - modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', - Parameters=[{ - 'ParameterName': 'foo', - 'ParameterValue': 'foo_val', - 'Description': 'test param', - 'ApplyMethod': 'immediate' - }] - ) - - modify_result['DBParameterGroupName'].should.equal('test') - - db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') - db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') - db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') - db_parameters['Parameters'][0]['Description'].should.equal('test param') - db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') - - -@mock_rds2 -def test_delete_non_existant_db_parameter_group(): - conn = boto3.client('rds', region_name='us-west-2') - conn.delete_db_parameter_group.when.called_with( - DBParameterGroupName='non-existant').should.throw(ClientError) - - -@mock_rds2 -def test_create_parameter_group_with_tags(): - conn = boto3.client('rds', region_name='us-west-2') - conn.create_db_parameter_group(DBParameterGroupName='test', - DBParameterGroupFamily='mysql5.6', - Description='test parameter group', - Tags=[{ - 'Key': 'foo', - 'Value': 'bar', - }]) - result = conn.list_tags_for_resource( - ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') - result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) +from __future__ import unicode_literals + +from botocore.exceptions import ClientError, ParamValidationError +import boto3 +import sure # noqa +from moto import mock_ec2, mock_kms, mock_rds2 + + +@mock_rds2 +def test_create_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('my_sg') + db_instance['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") + db_instance['CopyTagsToSnapshot'].should.equal(False) + + +@mock_rds2 +def test_stop_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test stopping database should shutdown + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + # test rdsclient error when trying to stop an already stopped database + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # test stopping a stopped database with snapshot should error and no snapshot should exist for that call + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + response = conn.describe_db_snapshots() + response['DBSnapshots'].should.equal([]) + + +@mock_rds2 +def test_start_database(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # test starting an already started database should error + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # stop and test start - should go from stopped to available, create snapshot and check snapshot + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('available') + # starting database should not remove snapshot + response = conn.describe_db_snapshots() + response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') + # test stopping database, create snapshot with existing snapshot already created should throw error + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap').should.throw(ClientError) + # test stopping database not invoking snapshot should succeed. + response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') + + +@mock_rds2 +def test_fail_to_stop_multi_az(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + MultiAZ=True) + + mydb = conn.describe_db_instances(DBInstanceIdentifier=database['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # multi-az databases arent allowed to be shutdown at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # multi-az databases arent allowed to be started up at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_fail_to_stop_readreplica(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + LicenseModel='license-included', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + + mydb = conn.describe_db_instances(DBInstanceIdentifier=replica['DBInstance']['DBInstanceIdentifier'])['DBInstances'][0] + mydb['DBInstanceStatus'].should.equal('available') + # read-replicas are not allowed to be stopped at this time. + conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + # read-replicas are not allowed to be started at this time. + conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) + + +@mock_rds2 +def test_get_databases(): + conn = boto3.client('rds', region_name='us-west-2') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + conn.create_db_instance(DBInstanceIdentifier='db-master-2', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(2) + + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + instances['DBInstances'][0][ + 'DBInstanceIdentifier'].should.equal("db-master-1") + instances['DBInstances'][0]['DBInstanceArn'].should.equal( + 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') + + +@mock_rds2 +def test_get_databases_paginated(): + conn = boto3.client('rds', region_name="us-west-2") + + for i in range(51): + conn.create_db_instance(AllocatedStorage=5, + Port=5432, + DBInstanceIdentifier='rds%d' % i, + DBInstanceClass='db.t1.micro', + Engine='postgres') + + resp = conn.describe_db_instances() + resp["DBInstances"].should.have.length_of(50) + resp["Marker"].should.equal(resp["DBInstances"][-1]['DBInstanceIdentifier']) + + resp2 = conn.describe_db_instances(Marker=resp["Marker"]) + resp2["DBInstances"].should.have.length_of(1) + + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) + +@mock_rds2 +def test_describe_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_instances.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_modify_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(10) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=20, + ApplyImmediately=True) + instances = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) + + +@mock_rds2 +def test_rename_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + NewDBInstanceIdentifier='db-master-2', + ApplyImmediately=True) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") + list(instances['DBInstances']).should.have.length_of(1) + + +@mock_rds2 +def test_modify_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_db_instance.when.called_with(DBInstanceIdentifier='not-a-db', + AllocatedStorage=20, + ApplyImmediately=True).should.throw(ClientError) + + +@mock_rds2 +def test_reboot_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + database = conn.reboot_db_instance(DBInstanceIdentifier='db-master-1') + database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + + +@mock_rds2 +def test_reboot_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.reboot_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database(): + conn = boto3.client('rds', region_name='us-west-2') + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(1) + + conn.delete_db_instance(DBInstanceIdentifier="db-primary-1", + FinalDBSnapshotIdentifier='primary-1-snapshot') + + instances = conn.describe_db_instances() + list(instances['DBInstances']).should.have.length_of(0) + + # Saved the snapshot + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier="db-primary-1").get('DBSnapshots') + snapshots[0].get('Engine').should.equal('postgres') + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds2', region_name="us-west-2") + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_create_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([]) + + +@mock_rds2 +def test_create_db_snapshots_copy_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_snapshot.when.called_with( + DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + CopyTagsToSnapshot=True, + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='g-1').get('DBSnapshot') + + snapshot.get('Engine').should.equal('postgres') + snapshot.get('DBInstanceIdentifier').should.equal('db-primary-1') + snapshot.get('DBSnapshotIdentifier').should.equal('g-1') + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_describe_db_snapshots(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + created = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1').get('DBSnapshot') + + created.get('Engine').should.equal('postgres') + + by_database_id = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + by_snapshot_id = conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots') + by_snapshot_id.should.equal(by_database_id) + + snapshot = by_snapshot_id[0] + snapshot.should.equal(created) + snapshot.get('Engine').should.equal('postgres') + + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-2') + snapshots = conn.describe_db_snapshots(DBInstanceIdentifier='db-primary-1').get('DBSnapshots') + snapshots.should.have.length_of(2) + + +@mock_rds2 +def test_delete_db_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-1') + + conn.describe_db_snapshots(DBSnapshotIdentifier='snapshot-1').get('DBSnapshots')[0] + conn.delete_db_snapshot(DBSnapshotIdentifier='snapshot-1') + conn.describe_db_snapshots.when.called_with( + DBSnapshotIdentifier='snapshot-1').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + option_group = conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_group['OptionGroup']['OptionGroupName'].should.equal('test') + option_group['OptionGroup']['EngineName'].should.equal('mysql') + option_group['OptionGroup'][ + 'OptionGroupDescription'].should.equal('test option group') + option_group['OptionGroup']['MajorEngineVersion'].should.equal('5.6') + + +@mock_rds2 +def test_create_option_group_bad_engine_name(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='invalid_engine', + MajorEngineVersion='5.6', + OptionGroupDescription='test invalid engine').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_bad_engine_major_version(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='6.6.6', + OptionGroupDescription='test invalid engine version').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='').should.throw(ClientError) + + +@mock_rds2 +def test_create_option_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + conn.create_option_group.when.called_with(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_option_groups.when.called_with( + OptionGroupName="not-a-option-group").should.throw(ClientError) + + +@mock_rds2 +def test_delete_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + option_groups = conn.describe_option_groups(OptionGroupName='test') + option_groups['OptionGroupsList'][0][ + 'OptionGroupName'].should.equal('test') + conn.delete_option_group(OptionGroupName='test') + conn.describe_option_groups.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_delete_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_option_group.when.called_with( + OptionGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_describe_option_group_options(): + conn = boto3.client('rds', region_name='us-west-2') + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee') + len(option_group_options['OptionGroupOptions']).should.equal(4) + option_group_options = conn.describe_option_group_options( + EngineName='sqlserver-ee', MajorEngineVersion='11.00') + len(option_group_options['OptionGroupOptions']).should.equal(2) + option_group_options = conn.describe_option_group_options( + EngineName='mysql', MajorEngineVersion='5.6') + len(option_group_options['OptionGroupOptions']).should.equal(1) + conn.describe_option_group_options.when.called_with( + EngineName='non-existent').should.throw(ClientError) + conn.describe_option_group_options.when.called_with( + EngineName='mysql', MajorEngineVersion='non-existent').should.throw(ClientError) + + +@mock_rds2 +def test_modify_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + # TODO: create option and validate before deleting. + # if Someone can tell me how the hell to use this function + # to add options to an option_group, I can finish coding this. + result = conn.modify_option_group(OptionGroupName='test', OptionsToInclude=[ + ], OptionsToRemove=['MEMCACHED'], ApplyImmediately=True) + result['OptionGroup']['EngineName'].should.equal('mysql') + result['OptionGroup']['Options'].should.equal([]) + result['OptionGroup']['OptionGroupName'].should.equal('test') + + +@mock_rds2 +def test_modify_option_group_no_options(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', EngineName='mysql', + MajorEngineVersion='5.6', OptionGroupDescription='test option group') + conn.modify_option_group.when.called_with( + OptionGroupName='test').should.throw(ClientError) + + +@mock_rds2 +def test_modify_non_existant_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.modify_option_group.when.called_with(OptionGroupName='non-existant', OptionsToInclude=[( + 'OptionName', 'Port', 'DBSecurityGroupMemberships', 'VpcSecurityGroupMemberships', 'OptionSettings')]).should.throw(ParamValidationError) + + +@mock_rds2 +def test_delete_non_existant_database(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_instance.when.called_with( + DBInstanceIdentifier="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_invalid_arn(): + conn = boto3.client('rds', region_name='us-west-2') + conn.list_tags_for_resource.when.called_with( + ResourceName='arn:aws:rds:bad-arn').should.throw(ClientError) + + +@mock_rds2 +def test_list_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:foo') + result['TagList'].should.equal([]) + test_instance = conn.create_db_instance( + DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName=test_instance['DBInstance']['DBInstanceArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-without-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_db(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-with-tags', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg'], + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:db:db-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_list_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:foo') + result['TagList'].should.equal([]) + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource(ResourceName=snapshot['DBSnapshot']['DBSnapshotArn']) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(2) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-without-tags') + list(result['TagList']).should.have.length_of(3) + + +@mock_rds2 +def test_remove_tags_snapshot(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_instance(DBInstanceIdentifier='db-primary-1', + AllocatedStorage=10, + Engine='postgres', + DBName='staging-postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + snapshot = conn.create_db_snapshot(DBInstanceIdentifier='db-primary-1', + DBSnapshotIdentifier='snapshot-with-tags', + Tags=[ + { + 'Key': 'foo', + 'Value': 'bar', + }, + { + 'Key': 'foo1', + 'Value': 'bar1', + }, + ]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags', TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:snapshot:snapshot-with-tags') + len(result['TagList']).should.equal(1) + + +@mock_rds2 +def test_add_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(0) + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + + +@mock_rds2 +def test_remove_tags_option_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_option_group(OptionGroupName='test', + EngineName='mysql', + MajorEngineVersion='5.6', + OptionGroupDescription='test option group') + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + conn.add_tags_to_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + Tags=[ + { + 'Key': 'foo', + 'Value': 'fish', + }, + { + 'Key': 'foo2', + 'Value': 'bar2', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(2) + conn.remove_tags_from_resource(ResourceName='arn:aws:rds:us-west-2:1234567890:og:test', + TagKeys=['foo']) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:og:test') + list(result['TagList']).should.have.length_of(1) + + +@mock_rds2 +def test_create_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + result['DBSecurityGroup']['DBSecurityGroupName'].should.equal("db_sg") + result['DBSecurityGroup'][ + 'DBSecurityGroupDescription'].should.equal("DB Security Group") + result['DBSecurityGroup']['IPRanges'].should.equal([]) + + +@mock_rds2 +def test_get_security_groups(): + conn = boto3.client('rds', region_name='us-west-2') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + conn.create_db_security_group( + DBSecurityGroupName='db_sg1', DBSecurityGroupDescription='DB Security Group') + conn.create_db_security_group( + DBSecurityGroupName='db_sg2', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(2) + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg1") + result['DBSecurityGroups'].should.have.length_of(1) + result['DBSecurityGroups'][0]['DBSecurityGroupName'].should.equal("db_sg1") + + +@mock_rds2 +def test_get_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.describe_db_security_groups.when.called_with( + DBSecurityGroupName="not-a-sg").should.throw(ClientError) + + +@mock_rds2 +def test_delete_database_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_security_group( + DBSecurityGroupName='db_sg', DBSecurityGroupDescription='DB Security Group') + + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(1) + + conn.delete_db_security_group(DBSecurityGroupName="db_sg") + result = conn.describe_db_security_groups() + result['DBSecurityGroups'].should.have.length_of(0) + + +@mock_rds2 +def test_delete_non_existant_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_security_group.when.called_with( + DBSecurityGroupName="not-a-db").should.throw(ClientError) + + +@mock_rds2 +def test_security_group_authorize(): + conn = boto3.client('rds', region_name='us-west-2') + security_group = conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + security_group['DBSecurityGroup']['IPRanges'].should.equal([]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.45/32') + + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(1) + result['DBSecurityGroups'][0]['IPRanges'].should.equal( + [{'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}]) + + conn.authorize_db_security_group_ingress(DBSecurityGroupName='db_sg', + CIDRIP='10.3.2.46/32') + result = conn.describe_db_security_groups(DBSecurityGroupName="db_sg") + result['DBSecurityGroups'][0]['IPRanges'].should.have.length_of(2) + result['DBSecurityGroups'][0]['IPRanges'].should.equal([ + {'Status': 'authorized', 'CIDRIP': '10.3.2.45/32'}, + {'Status': 'authorized', 'CIDRIP': '10.3.2.46/32'}, + ]) + + +@mock_rds2 +def test_add_security_group_to_database(): + conn = boto3.client('rds', region_name='us-west-2') + + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'].should.equal([]) + conn.create_db_security_group(DBSecurityGroupName='db_sg', + DBSecurityGroupDescription='DB Security Group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBSecurityGroups=['db_sg']) + result = conn.describe_db_instances() + result['DBInstances'][0]['DBSecurityGroups'][0][ + 'DBSecurityGroupName'].should.equal('db_sg') + + +@mock_rds2 +def test_list_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_add_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group')['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_rds2 +def test_remove_tags_security_group(): + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + security_group = conn.create_db_security_group(DBSecurityGroupName="db_sg", + DBSecurityGroupDescription='DB Security Group', + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSecurityGroup']['DBSecurityGroupName'] + + resource = 'arn:aws:rds:us-west-2:1234567890:secgrp:{0}'.format( + security_group) + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_create_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet1 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + subnet2 = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/26')['Subnet'] + + subnet_ids = [subnet1['SubnetId'], subnet2['SubnetId']] + conn = boto3.client('rds', region_name='us-west-2') + result = conn.create_db_subnet_group(DBSubnetGroupName='db_subnet', + DBSubnetGroupDescription='my db subnet', + SubnetIds=subnet_ids) + result['DBSubnetGroup']['DBSubnetGroupName'].should.equal("db_subnet") + result['DBSubnetGroup'][ + 'DBSubnetGroupDescription'].should.equal("my db subnet") + subnets = result['DBSubnetGroup']['Subnets'] + subnet_group_ids = [subnets[0]['SubnetIdentifier'], + subnets[1]['SubnetIdentifier']] + list(subnet_group_ids).should.equal(subnet_ids) + + +@mock_ec2 +@mock_rds2 +def test_create_database_in_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet1', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSubnetGroupName='db_subnet1') + result = conn.describe_db_instances(DBInstanceIdentifier='db-master-1') + result['DBInstances'][0]['DBSubnetGroup'][ + 'DBSubnetGroupName'].should.equal('db_subnet1') + + +@mock_ec2 +@mock_rds2 +def test_describe_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + conn.create_db_subnet_group(DBSubnetGroupName='db_subnet2', + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + + resp = conn.describe_db_subnet_groups() + resp['DBSubnetGroups'].should.have.length_of(2) + + subnets = resp['DBSubnetGroups'][0]['Subnets'] + subnets.should.have.length_of(1) + + list(conn.describe_db_subnet_groups(DBSubnetGroupName="db_subnet1") + ['DBSubnetGroups']).should.have.length_of(1) + + conn.describe_db_subnet_groups.when.called_with( + DBSubnetGroupName="not-a-subnet").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_delete_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']]) + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(1) + + conn.delete_db_subnet_group(DBSubnetGroupName="db_subnet1") + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + conn.delete_db_subnet_group.when.called_with( + DBSubnetGroupName="db_subnet1").should.throw(ClientError) + + +@mock_ec2 +@mock_rds2 +def test_list_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet)) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_add_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.add_tags_to_resource(ResourceName=resource, + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}]) + + +@mock_ec2 +@mock_rds2 +def test_remove_tags_database_subnet_group(): + vpc_conn = boto3.client('ec2', 'us-west-2') + vpc = vpc_conn.create_vpc(CidrBlock='10.0.0.0/16')['Vpc'] + subnet = vpc_conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.1.0.0/24')['Subnet'] + + conn = boto3.client('rds', region_name='us-west-2') + result = conn.describe_db_subnet_groups() + result['DBSubnetGroups'].should.have.length_of(0) + + subnet = conn.create_db_subnet_group(DBSubnetGroupName="db_subnet1", + DBSubnetGroupDescription='my db subnet', + SubnetIds=[subnet['SubnetId']], + Tags=[{'Value': 'bar', + 'Key': 'foo'}, + {'Value': 'bar1', + 'Key': 'foo1'}])['DBSubnetGroup']['DBSubnetGroupName'] + resource = 'arn:aws:rds:us-west-2:1234567890:subgrp:{0}'.format(subnet) + + conn.remove_tags_from_resource(ResourceName=resource, TagKeys=['foo']) + + result = conn.list_tags_for_resource(ResourceName=resource) + result['TagList'].should.equal([{'Value': 'bar1', 'Key': 'foo1'}]) + + +@mock_rds2 +def test_create_database_replica(): + conn = boto3.client('rds', region_name='us-west-2') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"]) + + replica = conn.create_db_instance_read_replica(DBInstanceIdentifier="db-replica-1", + SourceDBInstanceIdentifier="db-master-1", + DBInstanceClass="db.m1.small") + replica['DBInstance'][ + 'ReadReplicaSourceDBInstanceIdentifier'].should.equal('db-master-1') + replica['DBInstance']['DBInstanceClass'].should.equal('db.m1.small') + replica['DBInstance']['DBInstanceIdentifier'].should.equal('db-replica-1') + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0]['ReadReplicaDBInstanceIdentifiers'].should.equal([ + 'db-replica-1']) + + conn.delete_db_instance( + DBInstanceIdentifier="db-replica-1", SkipFinalSnapshot=True) + + master = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + master['DBInstances'][0][ + 'ReadReplicaDBInstanceIdentifiers'].should.equal([]) + + +@mock_rds2 +@mock_kms +def test_create_database_with_encrypted_storage(): + kms_conn = boto3.client('kms', region_name='us-west-2') + key = kms_conn.create_key(Policy='my RDS encryption policy', + Description='RDS encryption key', + KeyUsage='ENCRYPT_DECRYPT') + + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=["my_sg"], + StorageEncrypted=True, + KmsKeyId=key['KeyMetadata']['KeyId']) + + database['DBInstance']['StorageEncrypted'].should.equal(True) + database['DBInstance']['KmsKeyId'].should.equal( + key['KeyMetadata']['KeyId']) + + +@mock_rds2 +def test_create_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupName'].should.equal('test') + db_parameter_group['DBParameterGroup'][ + 'DBParameterGroupFamily'].should.equal('mysql5.6') + db_parameter_group['DBParameterGroup'][ + 'Description'].should.equal('test parameter group') + + +@mock_rds2 +def test_create_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + DBParameterGroupName='test', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_database_with_default_port(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='postgres', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + DBSecurityGroups=["my_sg"]) + database['DBInstance']['Endpoint']['Port'].should.equal(5432) + + +@mock_rds2 +def test_modify_db_instance_with_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + Engine='mysql', + DBInstanceClass='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234) + + len(database['DBInstance']['DBParameterGroups']).should.equal(1) + database['DBInstance']['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('default.mysql5.6') + database['DBInstance']['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + db_parameter_group = conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + DBParameterGroupName='test', + ApplyImmediately=True) + + database = conn.describe_db_instances( + DBInstanceIdentifier='db-master-1')['DBInstances'][0] + len(database['DBParameterGroups']).should.equal(1) + database['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + database['DBParameterGroups'][0][ + 'ParameterApplyStatus'].should.equal('in-sync') + + +@mock_rds2 +def test_create_db_parameter_group_empty_description(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='').should.throw(ClientError) + + +@mock_rds2 +def test_create_db_parameter_group_duplicate(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + conn.create_db_parameter_group.when.called_with(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group').should.throw(ClientError) + + +@mock_rds2 +def test_describe_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + + +@mock_rds2 +def test_describe_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_delete_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + db_parameter_groups['DBParameterGroups'][0][ + 'DBParameterGroupName'].should.equal('test') + conn.delete_db_parameter_group(DBParameterGroupName='test') + db_parameter_groups = conn.describe_db_parameter_groups( + DBParameterGroupName='test') + len(db_parameter_groups['DBParameterGroups']).should.equal(0) + + +@mock_rds2 +def test_modify_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group') + + modify_result = conn.modify_db_parameter_group(DBParameterGroupName='test', + Parameters=[{ + 'ParameterName': 'foo', + 'ParameterValue': 'foo_val', + 'Description': 'test param', + 'ApplyMethod': 'immediate' + }] + ) + + modify_result['DBParameterGroupName'].should.equal('test') + + db_parameters = conn.describe_db_parameters(DBParameterGroupName='test') + db_parameters['Parameters'][0]['ParameterName'].should.equal('foo') + db_parameters['Parameters'][0]['ParameterValue'].should.equal('foo_val') + db_parameters['Parameters'][0]['Description'].should.equal('test param') + db_parameters['Parameters'][0]['ApplyMethod'].should.equal('immediate') + + +@mock_rds2 +def test_delete_non_existant_db_parameter_group(): + conn = boto3.client('rds', region_name='us-west-2') + conn.delete_db_parameter_group.when.called_with( + DBParameterGroupName='non-existant').should.throw(ClientError) + + +@mock_rds2 +def test_create_parameter_group_with_tags(): + conn = boto3.client('rds', region_name='us-west-2') + conn.create_db_parameter_group(DBParameterGroupName='test', + DBParameterGroupFamily='mysql5.6', + Description='test parameter group', + Tags=[{ + 'Key': 'foo', + 'Value': 'bar', + }]) + result = conn.list_tags_for_resource( + ResourceName='arn:aws:rds:us-west-2:1234567890:pg:test') + result['TagList'].should.equal([{'Value': 'bar', 'Key': 'foo'}]) diff --git a/tests/test_rds2/test_server.py b/tests/test_rds2/test_server.py index f9489e054..8ae44fb58 100644 --- a/tests/test_rds2/test_server.py +++ b/tests/test_rds2/test_server.py @@ -1,20 +1,20 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server -from moto import mock_rds2 - -''' -Test the different server responses -''' - - -#@mock_rds2 -# def test_list_databases(): -# backend = server.create_backend_app("rds2") -# test_client = backend.test_client() -# -# res = test_client.get('/?Action=DescribeDBInstances') -# -# res.data.decode("utf-8").should.contain("") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_rds2 + +''' +Test the different server responses +''' + + +#@mock_rds2 +# def test_list_databases(): +# backend = server.create_backend_app("rds2") +# test_client = backend.test_client() +# +# res = test_client.get('/?Action=DescribeDBInstances') +# +# res.data.decode("utf-8").should.contain("") diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index 9208c92dd..f0e227a5d 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -1,1242 +1,1242 @@ -from __future__ import unicode_literals - -import datetime - -import boto -import boto3 -from boto.redshift.exceptions import ( - ClusterNotFound, - ClusterParameterGroupNotFound, - ClusterSecurityGroupNotFound, - ClusterSubnetGroupNotFound, - InvalidSubnet, -) -from botocore.exceptions import ( - ClientError -) -import sure # noqa - -from moto import mock_ec2 -from moto import mock_ec2_deprecated -from moto import mock_redshift -from moto import mock_redshift_deprecated - - -@mock_redshift -def test_create_cluster_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - response['Cluster']['NodeType'].should.equal('ds2.xlarge') - create_time = response['Cluster']['ClusterCreateTime'] - create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) - - -@mock_redshift -def test_create_snapshot_copy_grant(): - client = boto3.client('redshift', region_name='us-east-1') - grants = client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - KmsKeyId='fake', - ) - grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') - grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') - - client.delete_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1', - ) - - client.describe_snapshot_copy_grants.when.called_with( - SnapshotCopyGrantName='test-us-east-1', - ).should.throw(Exception) - - -@mock_redshift -def test_create_many_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - - for i in range(10): - client.create_snapshot_copy_grant( - SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), - KmsKeyId='fake', - ) - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(10) - - -@mock_redshift -def test_no_snapshot_copy_grants(): - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_snapshot_copy_grants() - len(response['SnapshotCopyGrants']).should.equal(0) - - -@mock_redshift_deprecated -def test_create_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - cluster_response = conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="multi-node", - availability_zone="us-east-1d", - preferred_maintenance_window="Mon:03:00-Mon:11:00", - automated_snapshot_retention_period=10, - port=1234, - cluster_version="1.0", - allow_version_upgrade=True, - number_of_nodes=3, - ) - cluster_response['CreateClusterResponse']['CreateClusterResult'][ - 'Cluster']['ClusterStatus'].should.equal('creating') - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("Default") - cluster['VpcSecurityGroups'].should.equal([]) - cluster['ClusterSubnetGroupName'].should.equal(None) - cluster['AvailabilityZone'].should.equal("us-east-1d") - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) - cluster['Port'].should.equal(1234) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(3) - - -@mock_redshift_deprecated -def test_create_single_node_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - db_name="my_db", - cluster_type="single-node", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal(cluster_identifier) - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['MasterUsername'].should.equal("username") - cluster['DBName'].should.equal("my_db") - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -def test_default_cluster_attributes(): - conn = boto.redshift.connect_to_region("us-east-1") - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['DBName'].should.equal("dev") - cluster['ClusterSubnetGroupName'].should.equal(None) - assert "us-east-" in cluster['AvailabilityZone'] - cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("default.redshift-1.0") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) - cluster['Port'].should.equal(5439) - cluster['ClusterVersion'].should.equal("1.0") - cluster['AllowVersionUpgrade'].should.equal(True) - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_in_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group( - "my_subnet_group", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_subnet_group_name='my_subnet_group', - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift -@mock_ec2 -def test_create_cluster_in_subnet_group_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id] - ) - - client.create_cluster( - ClusterIdentifier="my_cluster", - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSubnetGroupName='my_subnet_group', - ) - - cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") - cluster = cluster_response['Clusters'][0] - cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') - - -@mock_redshift_deprecated -def test_create_cluster_with_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.create_cluster_security_group( - "security_group1", - "This is my security group", - ) - conn.create_cluster_security_group( - "security_group2", - "This is my security group", - ) - - cluster_identifier = 'my_cluster' - conn.create_cluster( - cluster_identifier, - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_security_groups=["security_group1", "security_group2"] - ) - - cluster_response = conn.describe_clusters(cluster_identifier) - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal(set(["security_group1", "security_group2"])) - - -@mock_redshift -def test_create_cluster_with_security_group_boto3(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group2", - Description="This is my security group", - ) - - cluster_identifier = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_identifier, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - ClusterSecurityGroups=["security_group1", "security_group2"] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - group_names = [group['ClusterSecurityGroupName'] - for group in cluster['ClusterSecurityGroups']] - set(group_names).should.equal({"security_group1", "security_group2"}) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_with_vpc_security_groups(): - vpc_conn = boto.connect_vpc() - ec2_conn = boto.connect_ec2() - redshift_conn = boto.connect_redshift() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - security_group = ec2_conn.create_security_group( - "vpc_security_group", "a group", vpc_id=vpc.id) - - redshift_conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - vpc_security_group_ids=[security_group.id], - ) - - cluster_response = redshift_conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -@mock_ec2 -def test_create_cluster_with_vpc_security_groups_boto3(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - security_group = ec2.create_security_group( - Description="vpc_security_group", - GroupName="a group", - VpcId=vpc.id) - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - VpcSecurityGroupIds=[security_group.id], - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - group_ids = [group['VpcSecurityGroupId'] - for group in cluster['VpcSecurityGroups']] - list(group_ids).should.equal([security_group.id]) - - -@mock_redshift -def test_create_cluster_with_iam_roles(): - iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] - client = boto3.client('redshift', region_name='us-east-1') - cluster_id = 'my_cluster' - client.create_cluster( - ClusterIdentifier=cluster_id, - NodeType="dw.hs1.xlarge", - MasterUsername="username", - MasterUserPassword="password", - IamRoles=iam_roles_arn - ) - response = client.describe_clusters(ClusterIdentifier=cluster_id) - cluster = response['Clusters'][0] - iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] - iam_roles_arn.should.equal(iam_roles) - - -@mock_redshift_deprecated -def test_create_cluster_with_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - "my_cluster", - node_type="dw.hs1.xlarge", - master_username="username", - master_user_password="password", - cluster_parameter_group_name='my_parameter_group', - ) - - cluster_response = conn.describe_clusters("my_cluster") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - - -@mock_redshift_deprecated -def test_describe_non_existent_cluster(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_clusters.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(1) - - conn.delete_cluster(cluster_identifier) - - clusters = conn.describe_clusters()['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'] - list(clusters).should.have.length_of(0) - - # Delete invalid id - conn.delete_cluster.when.called_with( - "not-a-cluster").should.throw(ClusterNotFound) - - -@mock_redshift_deprecated -def test_modify_cluster(): - conn = boto.connect_redshift() - cluster_identifier = 'my_cluster' - conn.create_cluster_security_group( - "security_group", - "This is my security group", - ) - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - conn.create_cluster( - cluster_identifier, - node_type='single-node', - master_username="username", - master_user_password="password", - ) - - conn.modify_cluster( - cluster_identifier, - cluster_type="multi-node", - node_type="dw.hs1.xlarge", - cluster_security_groups="security_group", - master_user_password="new_password", - cluster_parameter_group_name="my_parameter_group", - automated_snapshot_retention_period=7, - preferred_maintenance_window="Tue:03:00-Tue:11:00", - allow_version_upgrade=False, - new_cluster_identifier="new_identifier", - ) - - cluster_response = conn.describe_clusters("new_identifier") - cluster = cluster_response['DescribeClustersResponse'][ - 'DescribeClustersResult']['Clusters'][0] - - cluster['ClusterIdentifier'].should.equal("new_identifier") - cluster['NodeType'].should.equal("dw.hs1.xlarge") - cluster['ClusterSecurityGroups'][0][ - 'ClusterSecurityGroupName'].should.equal("security_group") - cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") - cluster['ClusterParameterGroups'][0][ - 'ParameterGroupName'].should.equal("my_parameter_group") - cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) - cluster['AllowVersionUpgrade'].should.equal(False) - # This one should remain unmodified. - cluster['NumberOfNodes'].should.equal(1) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") - - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet1.id, subnet2.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups( - "my_subnet") - my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] - - my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") - my_subnet['Description'].should.equal("This is my subnet group") - subnet_ids = [subnet['SubnetIdentifier'] - for subnet in my_subnet['Subnets']] - set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_create_invalid_cluster_subnet_group(): - redshift_conn = boto.connect_redshift() - redshift_conn.create_cluster_subnet_group.when.called_with( - "my_subnet", - "This is my subnet group", - subnet_ids=["subnet-1234"], - ).should.throw(InvalidSubnet) - - -@mock_redshift_deprecated -def test_describe_non_existent_subnet_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_subnet_groups.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -@mock_ec2_deprecated -def test_delete_cluster_subnet_group(): - vpc_conn = boto.connect_vpc() - vpc = vpc_conn.create_vpc("10.0.0.0/16") - subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") - redshift_conn = boto.connect_redshift() - - redshift_conn.create_cluster_subnet_group( - "my_subnet", - "This is my subnet group", - subnet_ids=[subnet.id], - ) - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(1) - - redshift_conn.delete_cluster_subnet_group("my_subnet") - - subnets_response = redshift_conn.describe_cluster_subnet_groups() - subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ - 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] - subnets.should.have.length_of(0) - - # Delete invalid id - redshift_conn.delete_cluster_subnet_group.when.called_with( - "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups( - "my_security_group") - my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] - - my_group['ClusterSecurityGroupName'].should.equal("my_security_group") - my_group['Description'].should.equal("This is my security group") - list(my_group['IPRanges']).should.equal([]) - - -@mock_redshift_deprecated -def test_describe_non_existent_security_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_security_groups.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_security_group(): - conn = boto.connect_redshift() - conn.create_cluster_security_group( - "my_security_group", - "This is my security group", - ) - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_security_group("my_security_group") - - groups_response = conn.describe_cluster_security_groups() - groups = groups_response['DescribeClusterSecurityGroupsResponse'][ - 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_security_group.when.called_with( - "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) - - -@mock_redshift_deprecated -def test_create_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups( - "my_parameter_group") - my_group = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] - - my_group['ParameterGroupName'].should.equal("my_parameter_group") - my_group['ParameterGroupFamily'].should.equal("redshift-1.0") - my_group['Description'].should.equal("This is my parameter group") - - -@mock_redshift_deprecated -def test_describe_non_existent_parameter_group(): - conn = boto.redshift.connect_to_region("us-east-1") - conn.describe_cluster_parameter_groups.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - -@mock_redshift_deprecated -def test_delete_cluster_parameter_group(): - conn = boto.connect_redshift() - conn.create_cluster_parameter_group( - "my_parameter_group", - "redshift-1.0", - "This is my parameter group", - ) - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(2) # The default group already exists - - conn.delete_cluster_parameter_group("my_parameter_group") - - groups_response = conn.describe_cluster_parameter_groups() - groups = groups_response['DescribeClusterParameterGroupsResponse'][ - 'DescribeClusterParameterGroupsResult']['ParameterGroups'] - groups.should.have.length_of(1) - - # Delete invalid id - conn.delete_cluster_parameter_group.when.called_with( - "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) - - - -@mock_redshift -def test_create_cluster_snapshot_of_non_existent_cluster(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'non-existent-cluster-id' - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier='snapshot-id', - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - -@mock_redshift -def test_create_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - cluster_response = client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') - - snapshot_response = client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': 'test-tag-key', - 'Value': 'test-tag-value'}] - ) - snapshot = snapshot_response['Snapshot'] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - ) - - resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) - resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) - resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) - snapshot = resp_snap['Snapshots'][0] - snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) - snapshot['ClusterIdentifier'].should.equal(cluster_identifier) - snapshot['NumberOfNodes'].should.equal(1) - snapshot['NodeType'].should.equal('ds2.xlarge') - snapshot['MasterUsername'].should.equal('username') - - -@mock_redshift -def test_describe_cluster_snapshots_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.describe_cluster_snapshots.when.called_with( - ClusterIdentifier=cluster_identifier, - ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) - - client.describe_cluster_snapshots.when.called_with( - SnapshotIdentifier=snapshot_identifier - ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) - - -@mock_redshift -def test_delete_cluster_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(1) - - client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ - 'Snapshot']['Status'].should.equal('deleted') - - snapshots = client.describe_cluster_snapshots()['Snapshots'] - list(snapshots).should.have.length_of(0) - - # Delete invalid id - client.delete_cluster_snapshot.when.called_with( - SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) - - -@mock_redshift -def test_cluster_snapshot_already_exists(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - snapshot_identifier = 'my_snapshot' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ) - - client.create_cluster_snapshot.when.called_with( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier - ).should.throw(ClientError) - - -@mock_redshift -def test_create_cluster_from_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_snapshot_with_waiter(): - client = boto3.client('redshift', region_name='us-east-1') - original_cluster_identifier = 'original-cluster' - original_snapshot_identifier = 'original-snapshot' - new_cluster_identifier = 'new-cluster' - - client.create_cluster( - ClusterIdentifier=original_cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_cluster_snapshot( - SnapshotIdentifier=original_snapshot_identifier, - ClusterIdentifier=original_cluster_identifier - ) - response = client.restore_from_cluster_snapshot( - ClusterIdentifier=new_cluster_identifier, - SnapshotIdentifier=original_snapshot_identifier, - Port=1234 - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - client.get_waiter('cluster_restored').wait( - ClusterIdentifier=new_cluster_identifier, - WaiterConfig={ - 'Delay': 1, - 'MaxAttempts': 2, - } - ) - - response = client.describe_clusters( - ClusterIdentifier=new_cluster_identifier - ) - new_cluster = response['Clusters'][0] - new_cluster['NodeType'].should.equal('ds2.xlarge') - new_cluster['MasterUsername'].should.equal('username') - new_cluster['Endpoint']['Port'].should.equal(1234) - - -@mock_redshift -def test_create_cluster_from_non_existent_snapshot(): - client = boto3.client('redshift', region_name='us-east-1') - client.restore_from_cluster_snapshot.when.called_with( - ClusterIdentifier='cluster-id', - SnapshotIdentifier='non-existent-snapshot', - ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') - - -@mock_redshift -def test_create_cluster_status_update(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'test-cluster' - - response = client.create_cluster( - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - response['Cluster']['ClusterStatus'].should.equal('creating') - - response = client.describe_clusters( - ClusterIdentifier=cluster_identifier - ) - response['Clusters'][0]['ClusterStatus'].should.equal('available') - - -@mock_redshift -def test_describe_tags_with_resource_type(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'my_cluster' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'my_snapshot' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='cluster') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceType='snapshot') - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_describe_tags_cannot_specify_resource_type_and_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' - resource_type = 'cluster' - client.describe_tags.when.called_with( - ResourceName=resource_name, - ResourceType=resource_type - ).should.throw(ClientError, 'using either an ARN or a resource type') - - -@mock_redshift -def test_describe_tags_with_resource_name(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - snapshot_identifier = 'snapshot-id' - snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'snapshot:{}/{}'.format(cluster_identifier, - snapshot_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=cluster_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('cluster') - tagged_resources[0]['ResourceName'].should.equal(cluster_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - client.create_cluster_snapshot( - SnapshotIdentifier=snapshot_identifier, - ClusterIdentifier=cluster_identifier, - Tags=[{'Key': tag_key, - 'Value': tag_value}] - ) - tags_response = client.describe_tags(ResourceName=snapshot_arn) - tagged_resources = tags_response['TaggedResources'] - list(tagged_resources).should.have.length_of(1) - tagged_resources[0]['ResourceType'].should.equal('snapshot') - tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) - tag = tagged_resources[0]['Tag'] - tag['Key'].should.equal(tag_key) - tag['Value'].should.equal(tag_value) - - -@mock_redshift -def test_create_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - num_tags = 5 - tags = [] - for i in range(0, num_tags): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - ) - client.create_tags( - ResourceName=cluster_arn, - Tags=tags - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(num_tags) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(num_tags) - - -@mock_redshift -def test_delete_tags(): - client = boto3.client('redshift', region_name='us-east-1') - cluster_identifier = 'cluster-id' - cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ - 'cluster:{}'.format(cluster_identifier) - tag_key = 'test-tag-key' - tag_value = 'test-tag-value' - tags = [] - for i in range(1, 2): - tag = {'Key': '{}-{}'.format(tag_key, i), - 'Value': '{}-{}'.format(tag_value, i)} - tags.append(tag) - - client.create_cluster( - DBName='test-db', - ClusterIdentifier=cluster_identifier, - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='username', - MasterUserPassword='password', - Tags=tags - ) - client.delete_tags( - ResourceName=cluster_arn, - TagKeys=[tag['Key'] for tag in tags - if tag['Key'] != '{}-1'.format(tag_key)] - ) - response = client.describe_clusters(ClusterIdentifier=cluster_identifier) - cluster = response['Clusters'][0] - list(cluster['Tags']).should.have.length_of(1) - response = client.describe_tags(ResourceName=cluster_arn) - list(response['TaggedResources']).should.have.length_of(1) - - -@mock_ec2 -@mock_redshift -def test_describe_tags_all_resource_types(): - ec2 = boto3.resource('ec2', region_name='us-east-1') - vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') - client = boto3.client('redshift', region_name='us-east-1') - response = client.describe_tags() - list(response['TaggedResources']).should.have.length_of(0) - client.create_cluster_subnet_group( - ClusterSubnetGroupName='my_subnet_group', - Description='This is my subnet group', - SubnetIds=[subnet.id], - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_security_group( - ClusterSecurityGroupName="security_group1", - Description="This is my security group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster( - DBName='test', - ClusterIdentifier='my_cluster', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_snapshot( - SnapshotIdentifier='my_snapshot', - ClusterIdentifier='my_cluster', - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - client.create_cluster_parameter_group( - ParameterGroupName="my_parameter_group", - ParameterGroupFamily="redshift-1.0", - Description="This is my parameter group", - Tags=[{'Key': 'tag_key', - 'Value': 'tag_value'}] - ) - response = client.describe_tags() - expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] - tagged_resources = response['TaggedResources'] - returned_types = [resource['ResourceType'] for resource in tagged_resources] - list(tagged_resources).should.have.length_of(len(expected_types)) - set(returned_types).should.equal(set(expected_types)) - - -@mock_redshift -def test_tagged_resource_not_found_error(): - client = boto3.client('redshift', region_name='us-east-1') - - cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' - client.describe_tags.when.called_with( - ResourceName=cluster_arn - ).should.throw(ClientError, 'cluster (fake) not found.') - - snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' - client.delete_tags.when.called_with( - ResourceName=snapshot_arn, - TagKeys=['test'] - ).should.throw(ClientError, 'snapshot (snap-id) not found.') - - client.describe_tags.when.called_with( - ResourceType='cluster' - ).should.throw(ClientError, "resource of type 'cluster' not found.") - - client.describe_tags.when.called_with( - ResourceName='bad:arn' - ).should.throw(ClientError, "Tagging is not supported for this type of resource") - - -@mock_redshift -def test_enable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - Encrypted=True, - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') - - -@mock_redshift -def test_enable_snapshot_copy_unencrypted(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - ClusterIdentifier='test', - ClusterType='single-node', - DBName='test', - MasterUsername='user', - MasterUserPassword='password', - NodeType='ds2.xlarge', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) - cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') - - -@mock_redshift -def test_disable_snapshot_copy(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.disable_snapshot_copy( - ClusterIdentifier='test', - ) - response = client.describe_clusters(ClusterIdentifier='test') - response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') - - -@mock_redshift -def test_modify_snapshot_copy_retention_period(): - client = boto3.client('redshift', region_name='us-east-1') - client.create_cluster( - DBName='test', - ClusterIdentifier='test', - ClusterType='single-node', - NodeType='ds2.xlarge', - MasterUsername='user', - MasterUserPassword='password', - ) - client.enable_snapshot_copy( - ClusterIdentifier='test', - DestinationRegion='us-west-2', - RetentionPeriod=3, - SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', - ) - client.modify_snapshot_copy_retention_period( - ClusterIdentifier='test', - RetentionPeriod=5, - ) - response = client.describe_clusters(ClusterIdentifier='test') - cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] - cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) +from __future__ import unicode_literals + +import datetime + +import boto +import boto3 +from boto.redshift.exceptions import ( + ClusterNotFound, + ClusterParameterGroupNotFound, + ClusterSecurityGroupNotFound, + ClusterSubnetGroupNotFound, + InvalidSubnet, +) +from botocore.exceptions import ( + ClientError +) +import sure # noqa + +from moto import mock_ec2 +from moto import mock_ec2_deprecated +from moto import mock_redshift +from moto import mock_redshift_deprecated + + +@mock_redshift +def test_create_cluster_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + response['Cluster']['NodeType'].should.equal('ds2.xlarge') + create_time = response['Cluster']['ClusterCreateTime'] + create_time.should.be.lower_than(datetime.datetime.now(create_time.tzinfo)) + + +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) + + +@mock_redshift_deprecated +def test_create_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + cluster_response = conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="multi-node", + availability_zone="us-east-1d", + preferred_maintenance_window="Mon:03:00-Mon:11:00", + automated_snapshot_retention_period=10, + port=1234, + cluster_version="1.0", + allow_version_upgrade=True, + number_of_nodes=3, + ) + cluster_response['CreateClusterResponse']['CreateClusterResult'][ + 'Cluster']['ClusterStatus'].should.equal('creating') + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("Default") + cluster['VpcSecurityGroups'].should.equal([]) + cluster['ClusterSubnetGroupName'].should.equal(None) + cluster['AvailabilityZone'].should.equal("us-east-1d") + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(10) + cluster['Port'].should.equal(1234) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(3) + + +@mock_redshift_deprecated +def test_create_single_node_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + db_name="my_db", + cluster_type="single-node", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal(cluster_identifier) + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['MasterUsername'].should.equal("username") + cluster['DBName'].should.equal("my_db") + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +def test_default_cluster_attributes(): + conn = boto.redshift.connect_to_region("us-east-1") + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['DBName'].should.equal("dev") + cluster['ClusterSubnetGroupName'].should.equal(None) + assert "us-east-" in cluster['AvailabilityZone'] + cluster['PreferredMaintenanceWindow'].should.equal("Mon:03:00-Mon:03:30") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("default.redshift-1.0") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(1) + cluster['Port'].should.equal(5439) + cluster['ClusterVersion'].should.equal("1.0") + cluster['AllowVersionUpgrade'].should.equal(True) + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_in_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + redshift_conn = boto.connect_redshift() + redshift_conn.create_cluster_subnet_group( + "my_subnet_group", + "This is my subnet group", + subnet_ids=[subnet.id], + ) + + redshift_conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_subnet_group_name='my_subnet_group', + ) + + cluster_response = redshift_conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift +@mock_ec2 +def test_create_cluster_in_subnet_group_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id] + ) + + client.create_cluster( + ClusterIdentifier="my_cluster", + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSubnetGroupName='my_subnet_group', + ) + + cluster_response = client.describe_clusters(ClusterIdentifier="my_cluster") + cluster = cluster_response['Clusters'][0] + cluster['ClusterSubnetGroupName'].should.equal('my_subnet_group') + + +@mock_redshift_deprecated +def test_create_cluster_with_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.create_cluster_security_group( + "security_group1", + "This is my security group", + ) + conn.create_cluster_security_group( + "security_group2", + "This is my security group", + ) + + cluster_identifier = 'my_cluster' + conn.create_cluster( + cluster_identifier, + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_security_groups=["security_group1", "security_group2"] + ) + + cluster_response = conn.describe_clusters(cluster_identifier) + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal(set(["security_group1", "security_group2"])) + + +@mock_redshift +def test_create_cluster_with_security_group_boto3(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group2", + Description="This is my security group", + ) + + cluster_identifier = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_identifier, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + ClusterSecurityGroups=["security_group1", "security_group2"] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + group_names = [group['ClusterSecurityGroupName'] + for group in cluster['ClusterSecurityGroups']] + set(group_names).should.equal({"security_group1", "security_group2"}) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_with_vpc_security_groups(): + vpc_conn = boto.connect_vpc() + ec2_conn = boto.connect_ec2() + redshift_conn = boto.connect_redshift() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + security_group = ec2_conn.create_security_group( + "vpc_security_group", "a group", vpc_id=vpc.id) + + redshift_conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + vpc_security_group_ids=[security_group.id], + ) + + cluster_response = redshift_conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +@mock_ec2 +def test_create_cluster_with_vpc_security_groups_boto3(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + security_group = ec2.create_security_group( + Description="vpc_security_group", + GroupName="a group", + VpcId=vpc.id) + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + VpcSecurityGroupIds=[security_group.id], + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + group_ids = [group['VpcSecurityGroupId'] + for group in cluster['VpcSecurityGroups']] + list(group_ids).should.equal([security_group.id]) + + +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=iam_roles_arn + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + iam_roles_arn.should.equal(iam_roles) + + +@mock_redshift_deprecated +def test_create_cluster_with_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + "my_cluster", + node_type="dw.hs1.xlarge", + master_username="username", + master_user_password="password", + cluster_parameter_group_name='my_parameter_group', + ) + + cluster_response = conn.describe_clusters("my_cluster") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + + +@mock_redshift_deprecated +def test_describe_non_existent_cluster(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_clusters.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster(): + conn = boto.connect_redshift() + cluster_identifier = 'my_cluster' + + conn.create_cluster( + cluster_identifier, + node_type='single-node', + master_username="username", + master_user_password="password", + ) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(1) + + conn.delete_cluster(cluster_identifier) + + clusters = conn.describe_clusters()['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'] + list(clusters).should.have.length_of(0) + + # Delete invalid id + conn.delete_cluster.when.called_with( + "not-a-cluster").should.throw(ClusterNotFound) + + +@mock_redshift_deprecated +def test_modify_cluster(): + conn = boto.connect_redshift() + cluster_identifier = 'my_cluster' + conn.create_cluster_security_group( + "security_group", + "This is my security group", + ) + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + conn.create_cluster( + cluster_identifier, + node_type='single-node', + master_username="username", + master_user_password="password", + ) + + conn.modify_cluster( + cluster_identifier, + cluster_type="multi-node", + node_type="dw.hs1.xlarge", + cluster_security_groups="security_group", + master_user_password="new_password", + cluster_parameter_group_name="my_parameter_group", + automated_snapshot_retention_period=7, + preferred_maintenance_window="Tue:03:00-Tue:11:00", + allow_version_upgrade=False, + new_cluster_identifier="new_identifier", + ) + + cluster_response = conn.describe_clusters("new_identifier") + cluster = cluster_response['DescribeClustersResponse'][ + 'DescribeClustersResult']['Clusters'][0] + + cluster['ClusterIdentifier'].should.equal("new_identifier") + cluster['NodeType'].should.equal("dw.hs1.xlarge") + cluster['ClusterSecurityGroups'][0][ + 'ClusterSecurityGroupName'].should.equal("security_group") + cluster['PreferredMaintenanceWindow'].should.equal("Tue:03:00-Tue:11:00") + cluster['ClusterParameterGroups'][0][ + 'ParameterGroupName'].should.equal("my_parameter_group") + cluster['AutomatedSnapshotRetentionPeriod'].should.equal(7) + cluster['AllowVersionUpgrade'].should.equal(False) + # This one should remain unmodified. + cluster['NumberOfNodes'].should.equal(1) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_cluster_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet1 = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = vpc_conn.create_subnet(vpc.id, "10.0.1.0/24") + + redshift_conn = boto.connect_redshift() + + redshift_conn.create_cluster_subnet_group( + "my_subnet", + "This is my subnet group", + subnet_ids=[subnet1.id, subnet2.id], + ) + + subnets_response = redshift_conn.describe_cluster_subnet_groups( + "my_subnet") + my_subnet = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'][0] + + my_subnet['ClusterSubnetGroupName'].should.equal("my_subnet") + my_subnet['Description'].should.equal("This is my subnet group") + subnet_ids = [subnet['SubnetIdentifier'] + for subnet in my_subnet['Subnets']] + set(subnet_ids).should.equal(set([subnet1.id, subnet2.id])) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_create_invalid_cluster_subnet_group(): + redshift_conn = boto.connect_redshift() + redshift_conn.create_cluster_subnet_group.when.called_with( + "my_subnet", + "This is my subnet group", + subnet_ids=["subnet-1234"], + ).should.throw(InvalidSubnet) + + +@mock_redshift_deprecated +def test_describe_non_existent_subnet_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_subnet_groups.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + + +@mock_redshift_deprecated +@mock_ec2_deprecated +def test_delete_cluster_subnet_group(): + vpc_conn = boto.connect_vpc() + vpc = vpc_conn.create_vpc("10.0.0.0/16") + subnet = vpc_conn.create_subnet(vpc.id, "10.0.0.0/24") + redshift_conn = boto.connect_redshift() + + redshift_conn.create_cluster_subnet_group( + "my_subnet", + "This is my subnet group", + subnet_ids=[subnet.id], + ) + + subnets_response = redshift_conn.describe_cluster_subnet_groups() + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets.should.have.length_of(1) + + redshift_conn.delete_cluster_subnet_group("my_subnet") + + subnets_response = redshift_conn.describe_cluster_subnet_groups() + subnets = subnets_response['DescribeClusterSubnetGroupsResponse'][ + 'DescribeClusterSubnetGroupsResult']['ClusterSubnetGroups'] + subnets.should.have.length_of(0) + + # Delete invalid id + redshift_conn.delete_cluster_subnet_group.when.called_with( + "not-a-subnet-group").should.throw(ClusterSubnetGroupNotFound) + + +@mock_redshift_deprecated +def test_create_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups( + "my_security_group") + my_group = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'][0] + + my_group['ClusterSecurityGroupName'].should.equal("my_security_group") + my_group['Description'].should.equal("This is my security group") + list(my_group['IPRanges']).should.equal([]) + + +@mock_redshift_deprecated +def test_describe_non_existent_security_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_security_groups.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_security_group(): + conn = boto.connect_redshift() + conn.create_cluster_security_group( + "my_security_group", + "This is my security group", + ) + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_security_group("my_security_group") + + groups_response = conn.describe_cluster_security_groups() + groups = groups_response['DescribeClusterSecurityGroupsResponse'][ + 'DescribeClusterSecurityGroupsResult']['ClusterSecurityGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_security_group.when.called_with( + "not-a-security-group").should.throw(ClusterSecurityGroupNotFound) + + +@mock_redshift_deprecated +def test_create_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups( + "my_parameter_group") + my_group = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'][0] + + my_group['ParameterGroupName'].should.equal("my_parameter_group") + my_group['ParameterGroupFamily'].should.equal("redshift-1.0") + my_group['Description'].should.equal("This is my parameter group") + + +@mock_redshift_deprecated +def test_describe_non_existent_parameter_group(): + conn = boto.redshift.connect_to_region("us-east-1") + conn.describe_cluster_parameter_groups.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + +@mock_redshift_deprecated +def test_delete_cluster_parameter_group(): + conn = boto.connect_redshift() + conn.create_cluster_parameter_group( + "my_parameter_group", + "redshift-1.0", + "This is my parameter group", + ) + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(2) # The default group already exists + + conn.delete_cluster_parameter_group("my_parameter_group") + + groups_response = conn.describe_cluster_parameter_groups() + groups = groups_response['DescribeClusterParameterGroupsResponse'][ + 'DescribeClusterParameterGroupsResult']['ParameterGroups'] + groups.should.have.length_of(1) + + # Delete invalid id + conn.delete_cluster_parameter_group.when.called_with( + "not-a-parameter-group").should.throw(ClusterParameterGroupNotFound) + + + +@mock_redshift +def test_create_cluster_snapshot_of_non_existent_cluster(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'non-existent-cluster-id' + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier='snapshot-id', + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + +@mock_redshift +def test_create_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + cluster_response = client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + cluster_response['Cluster']['NodeType'].should.equal('ds2.xlarge') + + snapshot_response = client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': 'test-tag-key', + 'Value': 'test-tag-value'}] + ) + snapshot = snapshot_response['Snapshot'] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + ) + + resp_clust = client.describe_cluster_snapshots(ClusterIdentifier=cluster_identifier) + resp_snap = client.describe_cluster_snapshots(SnapshotIdentifier=snapshot_identifier) + resp_clust['Snapshots'][0].should.equal(resp_snap['Snapshots'][0]) + snapshot = resp_snap['Snapshots'][0] + snapshot['SnapshotIdentifier'].should.equal(snapshot_identifier) + snapshot['ClusterIdentifier'].should.equal(cluster_identifier) + snapshot['NumberOfNodes'].should.equal(1) + snapshot['NodeType'].should.equal('ds2.xlarge') + snapshot['MasterUsername'].should.equal('username') + + +@mock_redshift +def test_describe_cluster_snapshots_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.describe_cluster_snapshots.when.called_with( + ClusterIdentifier=cluster_identifier, + ).should.throw(ClientError, 'Cluster {} not found.'.format(cluster_identifier)) + + client.describe_cluster_snapshots.when.called_with( + SnapshotIdentifier=snapshot_identifier + ).should.throw(ClientError, 'Snapshot {} not found.'.format(snapshot_identifier)) + + +@mock_redshift +def test_delete_cluster_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(1) + + client.delete_cluster_snapshot(SnapshotIdentifier=snapshot_identifier)[ + 'Snapshot']['Status'].should.equal('deleted') + + snapshots = client.describe_cluster_snapshots()['Snapshots'] + list(snapshots).should.have.length_of(0) + + # Delete invalid id + client.delete_cluster_snapshot.when.called_with( + SnapshotIdentifier="not-a-snapshot").should.throw(ClientError) + + +@mock_redshift +def test_cluster_snapshot_already_exists(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + snapshot_identifier = 'my_snapshot' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ) + + client.create_cluster_snapshot.when.called_with( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier + ).should.throw(ClientError) + + +@mock_redshift +def test_create_cluster_from_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_snapshot_with_waiter(): + client = boto3.client('redshift', region_name='us-east-1') + original_cluster_identifier = 'original-cluster' + original_snapshot_identifier = 'original-snapshot' + new_cluster_identifier = 'new-cluster' + + client.create_cluster( + ClusterIdentifier=original_cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_cluster_snapshot( + SnapshotIdentifier=original_snapshot_identifier, + ClusterIdentifier=original_cluster_identifier + ) + response = client.restore_from_cluster_snapshot( + ClusterIdentifier=new_cluster_identifier, + SnapshotIdentifier=original_snapshot_identifier, + Port=1234 + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + client.get_waiter('cluster_restored').wait( + ClusterIdentifier=new_cluster_identifier, + WaiterConfig={ + 'Delay': 1, + 'MaxAttempts': 2, + } + ) + + response = client.describe_clusters( + ClusterIdentifier=new_cluster_identifier + ) + new_cluster = response['Clusters'][0] + new_cluster['NodeType'].should.equal('ds2.xlarge') + new_cluster['MasterUsername'].should.equal('username') + new_cluster['Endpoint']['Port'].should.equal(1234) + + +@mock_redshift +def test_create_cluster_from_non_existent_snapshot(): + client = boto3.client('redshift', region_name='us-east-1') + client.restore_from_cluster_snapshot.when.called_with( + ClusterIdentifier='cluster-id', + SnapshotIdentifier='non-existent-snapshot', + ).should.throw(ClientError, 'Snapshot non-existent-snapshot not found.') + + +@mock_redshift +def test_create_cluster_status_update(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'test-cluster' + + response = client.create_cluster( + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + response['Cluster']['ClusterStatus'].should.equal('creating') + + response = client.describe_clusters( + ClusterIdentifier=cluster_identifier + ) + response['Clusters'][0]['ClusterStatus'].should.equal('available') + + +@mock_redshift +def test_describe_tags_with_resource_type(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'my_cluster' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'my_snapshot' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='cluster') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceType='snapshot') + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_describe_tags_cannot_specify_resource_type_and_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + resource_name = 'arn:aws:redshift:us-east-1:123456789012:cluster:cluster-id' + resource_type = 'cluster' + client.describe_tags.when.called_with( + ResourceName=resource_name, + ResourceType=resource_type + ).should.throw(ClientError, 'using either an ARN or a resource type') + + +@mock_redshift +def test_describe_tags_with_resource_name(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + snapshot_identifier = 'snapshot-id' + snapshot_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'snapshot:{}/{}'.format(cluster_identifier, + snapshot_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=cluster_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('cluster') + tagged_resources[0]['ResourceName'].should.equal(cluster_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + client.create_cluster_snapshot( + SnapshotIdentifier=snapshot_identifier, + ClusterIdentifier=cluster_identifier, + Tags=[{'Key': tag_key, + 'Value': tag_value}] + ) + tags_response = client.describe_tags(ResourceName=snapshot_arn) + tagged_resources = tags_response['TaggedResources'] + list(tagged_resources).should.have.length_of(1) + tagged_resources[0]['ResourceType'].should.equal('snapshot') + tagged_resources[0]['ResourceName'].should.equal(snapshot_arn) + tag = tagged_resources[0]['Tag'] + tag['Key'].should.equal(tag_key) + tag['Value'].should.equal(tag_value) + + +@mock_redshift +def test_create_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + num_tags = 5 + tags = [] + for i in range(0, num_tags): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + ) + client.create_tags( + ResourceName=cluster_arn, + Tags=tags + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(num_tags) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(num_tags) + + +@mock_redshift +def test_delete_tags(): + client = boto3.client('redshift', region_name='us-east-1') + cluster_identifier = 'cluster-id' + cluster_arn = 'arn:aws:redshift:us-east-1:123456789012:' \ + 'cluster:{}'.format(cluster_identifier) + tag_key = 'test-tag-key' + tag_value = 'test-tag-value' + tags = [] + for i in range(1, 2): + tag = {'Key': '{}-{}'.format(tag_key, i), + 'Value': '{}-{}'.format(tag_value, i)} + tags.append(tag) + + client.create_cluster( + DBName='test-db', + ClusterIdentifier=cluster_identifier, + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='username', + MasterUserPassword='password', + Tags=tags + ) + client.delete_tags( + ResourceName=cluster_arn, + TagKeys=[tag['Key'] for tag in tags + if tag['Key'] != '{}-1'.format(tag_key)] + ) + response = client.describe_clusters(ClusterIdentifier=cluster_identifier) + cluster = response['Clusters'][0] + list(cluster['Tags']).should.have.length_of(1) + response = client.describe_tags(ResourceName=cluster_arn) + list(response['TaggedResources']).should.have.length_of(1) + + +@mock_ec2 +@mock_redshift +def test_describe_tags_all_resource_types(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') + subnet = ec2.create_subnet(VpcId=vpc.id, CidrBlock='10.0.0.0/24') + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_tags() + list(response['TaggedResources']).should.have.length_of(0) + client.create_cluster_subnet_group( + ClusterSubnetGroupName='my_subnet_group', + Description='This is my subnet group', + SubnetIds=[subnet.id], + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_security_group( + ClusterSecurityGroupName="security_group1", + Description="This is my security group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster( + DBName='test', + ClusterIdentifier='my_cluster', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_snapshot( + SnapshotIdentifier='my_snapshot', + ClusterIdentifier='my_cluster', + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + client.create_cluster_parameter_group( + ParameterGroupName="my_parameter_group", + ParameterGroupFamily="redshift-1.0", + Description="This is my parameter group", + Tags=[{'Key': 'tag_key', + 'Value': 'tag_value'}] + ) + response = client.describe_tags() + expected_types = ['cluster', 'parametergroup', 'securitygroup', 'snapshot', 'subnetgroup'] + tagged_resources = response['TaggedResources'] + returned_types = [resource['ResourceType'] for resource in tagged_resources] + list(tagged_resources).should.have.length_of(len(expected_types)) + set(returned_types).should.equal(set(expected_types)) + + +@mock_redshift +def test_tagged_resource_not_found_error(): + client = boto3.client('redshift', region_name='us-east-1') + + cluster_arn = 'arn:aws:redshift:us-east-1::cluster:fake' + client.describe_tags.when.called_with( + ResourceName=cluster_arn + ).should.throw(ClientError, 'cluster (fake) not found.') + + snapshot_arn = 'arn:aws:redshift:us-east-1::snapshot:cluster-id/snap-id' + client.delete_tags.when.called_with( + ResourceName=snapshot_arn, + TagKeys=['test'] + ).should.throw(ClientError, 'snapshot (snap-id) not found.') + + client.describe_tags.when.called_with( + ResourceType='cluster' + ).should.throw(ClientError, "resource of type 'cluster' not found.") + + client.describe_tags.when.called_with( + ResourceName='bad:arn' + ).should.throw(ClientError, "Tagging is not supported for this type of resource") + + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + Encrypted=True, + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index c37e9cab7..47ccdc5f3 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -1,22 +1,22 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_redshift - -''' -Test the different server responses -''' - - -@mock_redshift -def test_describe_clusters(): - backend = server.create_backend_app("redshift") - test_client = backend.test_client() - - res = test_client.get('/?Action=DescribeClusters') - - result = res.data.decode("utf-8") - result.should.contain("") +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_redshift + +''' +Test the different server responses +''' + + +@mock_redshift +def test_describe_clusters(): + backend = server.create_backend_app("redshift") + test_client = backend.test_client() + + res = test_client.get('/?Action=DescribeClusters') + + result = res.data.decode("utf-8") + result.should.contain("") diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 759063329..3961d05bc 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -1,285 +1,285 @@ -from __future__ import unicode_literals - -import boto3 -import sure # noqa -from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 - - -@mock_s3 -@mock_resourcegroupstaggingapi -def test_get_resources_s3(): - # Tests pagination - s3_client = boto3.client('s3', region_name='eu-central-1') - - # Will end up having key1,key2,key3,key4 - response_keys = set() - - # Create 4 buckets - for i in range(1, 5): - i_str = str(i) - s3_client.create_bucket(Bucket='test_bucket' + i_str) - s3_client.put_bucket_tagging( - Bucket='test_bucket' + i_str, - Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} - ) - response_keys.add('key' + i_str) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources(ResourcesPerPage=2) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(2) - - resp = rtapi.get_resources( - ResourcesPerPage=2, - PaginationToken=resp['PaginationToken'] - ) - for resource in resp['ResourceTagMappingList']: - response_keys.remove(resource['Tags'][0]['Key']) - - response_keys.should.have.length_of(0) - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_resources_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - instances = client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - instance_id = instances['Instances'][0]['InstanceId'] - image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] - - client.create_tags( - Resources=[image_id], - Tags=[{'Key': 'ami', 'Value': 'test'}] - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_resources() - # Check we have 1 entry for Instance, 1 Entry for AMI - resp['ResourceTagMappingList'].should.have.length_of(2) - - # 1 Entry for AMI - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') - - # As were iterating the same data, this rules out that the test above was a fluke - resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - # Basic test of tag filters - resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_keys_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_keys() - - resp['TagKeys'].should.contain('MY_TAG1') - resp['TagKeys'].should.contain('MY_TAG2') - resp['TagKeys'].should.contain('MY_TAG3') - - # TODO test pagenation - - -@mock_ec2 -@mock_resourcegroupstaggingapi -def test_get_tag_values_ec2(): - client = boto3.client('ec2', region_name='eu-central-1') - - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE1', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE2', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE3', - }, - ] - }, - ], - ) - client.run_instances( - ImageId='ami-123', - MinCount=1, - MaxCount=1, - InstanceType='t2.micro', - TagSpecifications=[ - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG1', - 'Value': 'MY_VALUE4', - }, - { - 'Key': 'MY_TAG2', - 'Value': 'MY_VALUE5', - }, - ], - }, - { - 'ResourceType': 'instance', - 'Tags': [ - { - 'Key': 'MY_TAG3', - 'Value': 'MY_VALUE6', - }, - ] - }, - ], - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') - resp = rtapi.get_tag_values(Key='MY_TAG1') - - resp['TagValues'].should.contain('MY_VALUE1') - resp['TagValues'].should.contain('MY_VALUE4') - -@mock_ec2 -@mock_elbv2 -@mock_resourcegroupstaggingapi -def test_get_resources_elbv2(): - conn = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - security_group = ec2.create_security_group( - GroupName='a-security-group', Description='First One') - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') - subnet1 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1a') - subnet2 = ec2.create_subnet( - VpcId=vpc.id, - CidrBlock='172.28.7.192/26', - AvailabilityZone='us-east-1b') - - conn.create_load_balancer( - Name='my-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - Tags=[ - { - 'Key': 'key_name', - 'Value': 'a_value' - }, - { - 'Key': 'key_2', - 'Value': 'val2' - } - ] - ) - - conn.create_load_balancer( - Name='my-other-lb', - Subnets=[subnet1.id, subnet2.id], - SecurityGroups=[security_group.id], - Scheme='internal', - ) - - rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') - - resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) - - resp['ResourceTagMappingList'].should.have.length_of(2) - resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') - resp = rtapi.get_resources( - ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], - TagFilters=[{ - 'Key': 'key_name' - }] - ) - - resp['ResourceTagMappingList'].should.have.length_of(1) - resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) - - # TODO test pagenation +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client('s3', region_name='eu-central-1') + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket(Bucket='test_bucket' + i_str) + s3_client.put_bucket_tagging( + Bucket='test_bucket' + i_str, + Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} + ) + response_keys.add('key' + i_str) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, + PaginationToken=resp['PaginationToken'] + ) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + instances = client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + instance_id = instances['Instances'][0]['InstanceId'] + image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] + + client.create_tags( + Resources=[image_id], + Tags=[{'Key': 'ami', 'Value': 'test'}] + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources() + # Check we have 1 entry for Instance, 1 Entry for AMI + resp['ResourceTagMappingList'].should.have.length_of(2) + + # 1 Entry for AMI + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') + + # As were iterating the same data, this rules out that the test above was a fluke + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + # Basic test of tag filters + resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_keys_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_keys() + + resp['TagKeys'].should.contain('MY_TAG1') + resp['TagKeys'].should.contain('MY_TAG2') + resp['TagKeys'].should.contain('MY_TAG3') + + # TODO test pagenation + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_values_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE4', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE5', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE6', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_values(Key='MY_TAG1') + + resp['TagValues'].should.contain('MY_VALUE1') + resp['TagValues'].should.contain('MY_VALUE4') + +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation diff --git a/tests/test_resourcegroupstaggingapi/test_server.py b/tests/test_resourcegroupstaggingapi/test_server.py index 311b1f03e..80a74b0b8 100644 --- a/tests/test_resourcegroupstaggingapi/test_server.py +++ b/tests/test_resourcegroupstaggingapi/test_server.py @@ -1,24 +1,24 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_resourcegroupstaggingapi_list(): - backend = server.create_backend_app("resourcegroupstaggingapi") - test_client = backend.test_client() - # do test - - headers = { - 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', - 'X-Amz-Date': '20171114T234623Z' - } - resp = test_client.post('/', headers=headers, data='{}') - - assert resp.status_code == 200 - assert b'ResourceTagMappingList' in resp.data +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_resourcegroupstaggingapi_list(): + backend = server.create_backend_app("resourcegroupstaggingapi") + test_client = backend.test_client() + # do test + + headers = { + 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', + 'X-Amz-Date': '20171114T234623Z' + } + resp = test_client.post('/', headers=headers, data='{}') + + assert resp.status_code == 200 + assert b'ResourceTagMappingList' in resp.data diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 76217b9d9..1a76a5454 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -1,711 +1,711 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.route53.healthcheck import HealthCheck -from boto.route53.record import ResourceRecordSets - -import sure # noqa - -import uuid - -import botocore -from nose.tools import assert_raises - -from moto import mock_route53, mock_route53_deprecated - - -@mock_route53_deprecated -def test_hosted_zone(): - conn = boto.connect_route53('the_key', 'the_secret') - firstzone = conn.create_hosted_zone("testdns.aws.com") - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - - conn.create_hosted_zone("testdns1.aws.com") - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) - - id1 = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - zone = conn.get_hosted_zone(id1) - zone["GetHostedZoneResponse"]["HostedZone"][ - "Name"].should.equal("testdns.aws.com.") - - conn.delete_hosted_zone(id1) - zones = conn.get_all_hosted_zones() - len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) - - conn.get_hosted_zone.when.called_with("abcd").should.throw( - boto.route53.exception.DNSServerError, "404 Not Found") - - -@mock_route53_deprecated -def test_rrset(): - conn = boto.connect_route53('the_key', 'the_secret') - - conn.get_all_rrsets.when.called_with("abcd", type="A").should.throw( - boto.route53.exception.DNSServerError, "404 Not Found") - - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - rrsets = conn.get_all_rrsets(zoneid, type="CNAME") - rrsets.should.have.length_of(0) - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid) - rrsets.should.have.length_of(0) - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") - changes.commit() - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - change = changes.add_change("CREATE", "bar.foo.testdns.aws.com", "A") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(2) - - rrsets = conn.get_all_rrsets( - zoneid, name="foo.bar.testdns.aws.com", type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('1.2.3.4') - - rrsets = conn.get_all_rrsets( - zoneid, name="bar.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(2) - resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] - resource_records.should.contain('1.2.3.4') - resource_records.should.contain('5.6.7.8') - - rrsets = conn.get_all_rrsets( - zoneid, name="foo.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(0) - - -@mock_route53_deprecated -def test_rrset_with_multiple_values(): - conn = boto.connect_route53('the_key', 'the_secret') - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") - change.add_value("1.2.3.4") - change.add_value("5.6.7.8") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) - set(rrsets[0].resource_records).should.equal(set(['1.2.3.4', '5.6.7.8'])) - - -@mock_route53_deprecated -def test_alias_rrset(): - conn = boto.connect_route53('the_key', 'the_secret') - zone = conn.create_hosted_zone("testdns.aws.com") - zoneid = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zoneid) - changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", - alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") - changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", - alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") - changes.commit() - - rrsets = conn.get_all_rrsets(zoneid, type="A") - rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] - rrset_records.should.have.length_of(2) - rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) - rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) - rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') - rrsets = conn.get_all_rrsets(zoneid, type="CNAME") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') - - -@mock_route53_deprecated -def test_create_health_check(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - fqdn="example.com", - string_match="a good response", - request_interval=10, - failure_threshold=2, - ) - conn.create_health_check(check) - - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - check = checks[0] - config = check['HealthCheckConfig'] - config['IPAddress'].should.equal("10.0.0.25") - config['Port'].should.equal("80") - config['Type'].should.equal("HTTP") - config['ResourcePath'].should.equal("/") - config['FullyQualifiedDomainName'].should.equal("example.com") - config['SearchString'].should.equal("a good response") - config['RequestInterval'].should.equal("10") - config['FailureThreshold'].should.equal("2") - - -@mock_route53_deprecated -def test_delete_health_check(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - ) - conn.create_health_check(check) - - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(1) - health_check_id = checks[0]['Id'] - - conn.delete_health_check(health_check_id) - checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ - 'HealthChecks'] - list(checks).should.have.length_of(0) - - -@mock_route53_deprecated -def test_use_health_check_in_resource_record_set(): - conn = boto.connect_route53('the_key', 'the_secret') - - check = HealthCheck( - ip_addr="10.0.0.25", - port=80, - hc_type="HTTP", - resource_path="/", - ) - check = conn.create_health_check( - check)['CreateHealthCheckResponse']['HealthCheck'] - check_id = check['Id'] - - zone = conn.create_hosted_zone("testdns.aws.com") - zone_id = zone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - changes = ResourceRecordSets(conn, zone_id) - change = changes.add_change( - "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) - change.add_value("1.2.3.4") - changes.commit() - - record_sets = conn.get_all_rrsets(zone_id) - record_sets[0].health_check.should.equal(check_id) - - -@mock_route53_deprecated -def test_hosted_zone_comment_preserved(): - conn = boto.connect_route53('the_key', 'the_secret') - - firstzone = conn.create_hosted_zone( - "testdns.aws.com.", comment="test comment") - zone_id = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(zone_id) - hosted_zone["GetHostedZoneResponse"]["HostedZone"][ - "Config"]["Comment"].should.equal("test comment") - - hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][ - 0]["Config"]["Comment"].should.equal("test comment") - - zone = conn.get_zone("testdns.aws.com.") - zone.config["Comment"].should.equal("test comment") - - -@mock_route53_deprecated -def test_deleting_weighted_route(): - conn = boto.connect_route53() - - conn.create_hosted_zone("testdns.aws.com.") - zone = conn.get_zone("testdns.aws.com.") - - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-foo', '50')) - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-bar', '50')) - - cnames = zone.get_cname('cname.testdns.aws.com.', all=True) - cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == - 'success-test-foo'][0] - - zone.delete_record(foo_cname) - cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead - # of a list. - cname.identifier.should.equal('success-test-bar') - - -@mock_route53_deprecated -def test_deleting_latency_route(): - conn = boto.connect_route53() - - conn.create_hosted_zone("testdns.aws.com.") - zone = conn.get_zone("testdns.aws.com.") - - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-foo', 'us-west-2')) - zone.add_cname("cname.testdns.aws.com", "example.com", - identifier=('success-test-bar', 'us-west-1')) - - cnames = zone.get_cname('cname.testdns.aws.com.', all=True) - cnames.should.have.length_of(2) - foo_cname = [cname for cname in cnames if cname.identifier == - 'success-test-foo'][0] - foo_cname.region.should.equal('us-west-2') - - zone.delete_record(foo_cname) - cname = zone.get_cname('cname.testdns.aws.com.', all=True) - # When get_cname only had one result, it returns just that result instead - # of a list. - cname.identifier.should.equal('success-test-bar') - cname.region.should.equal('us-west-1') - - -@mock_route53_deprecated -def test_hosted_zone_private_zone_preserved(): - conn = boto.connect_route53('the_key', 'the_secret') - - firstzone = conn.create_hosted_zone( - "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') - zone_id = firstzone["CreateHostedZoneResponse"][ - "HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(zone_id) - # in (original) boto, these bools returned as strings. - hosted_zone["GetHostedZoneResponse"]["HostedZone"][ - "Config"]["PrivateZone"].should.equal('True') - - hosted_zones = conn.get_all_hosted_zones() - hosted_zones["ListHostedZonesResponse"]["HostedZones"][ - 0]["Config"]["PrivateZone"].should.equal('True') - - zone = conn.get_zone("testdns.aws.com.") - zone.config["PrivateZone"].should.equal('True') - - -@mock_route53 -def test_hosted_zone_private_zone_preserved_boto3(): - conn = boto3.client('route53', region_name='us-east-1') - # TODO: actually create_hosted_zone statements with PrivateZone=True, but without - # a _valid_ vpc-id should fail. - firstzone = conn.create_hosted_zone( - Name="testdns.aws.com.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="Test", - ) - ) - - zone_id = firstzone["HostedZone"]["Id"].split("/")[-1] - - hosted_zone = conn.get_hosted_zone(Id=zone_id) - hosted_zone["HostedZone"]["Config"]["PrivateZone"].should.equal(True) - - hosted_zones = conn.list_hosted_zones() - hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) - - hosted_zones = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") - len(hosted_zones["HostedZones"]).should.equal(1) - hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) - - -@mock_route53 -def test_list_or_change_tags_for_resource_request(): - conn = boto3.client('route53', region_name='us-east-1') - health_check = conn.create_health_check( - CallerReference='foobar', - HealthCheckConfig={ - 'IPAddress': '192.0.2.44', - 'Port': 123, - 'Type': 'HTTP', - 'ResourcePath': '/', - 'RequestInterval': 30, - 'FailureThreshold': 123, - 'HealthThreshold': 123, - } - ) - healthcheck_id = health_check['HealthCheck']['Id'] - - tag1 = {"Key": "Deploy", "Value": "True"} - tag2 = {"Key": "Name", "Value": "UnitTest"} - - # Test adding a tag for a resource id - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - AddTags=[tag1, tag2] - ) - - # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response.should.contain('ResourceTagSet') - - # Validate that each key was added - response['ResourceTagSet']['Tags'].should.contain(tag1) - response['ResourceTagSet']['Tags'].should.contain(tag2) - - len(response['ResourceTagSet']['Tags']).should.equal(2) - - # Try to remove the tags - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag1['Key']] - ) - - # Check to make sure that the response has the 'ResourceTagSet' key - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response.should.contain('ResourceTagSet') - response['ResourceTagSet']['Tags'].should_not.contain(tag1) - response['ResourceTagSet']['Tags'].should.contain(tag2) - - # Remove the second tag - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag2['Key']] - ) - - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response['ResourceTagSet']['Tags'].should_not.contain(tag2) - - # Re-add the tags - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - AddTags=[tag1, tag2] - ) - - # Remove both - conn.change_tags_for_resource( - ResourceType='healthcheck', - ResourceId=healthcheck_id, - RemoveTagKeys=[tag1['Key'], tag2['Key']] - ) - - response = conn.list_tags_for_resource( - ResourceType='healthcheck', ResourceId=healthcheck_id) - response['ResourceTagSet']['Tags'].should.be.empty - - -@mock_route53 -def test_list_hosted_zones_by_name(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="test.b.com.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test com", - ) - ) - conn.create_hosted_zone( - Name="test.a.org.", - CallerReference=str(hash('bar')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test org", - ) - ) - conn.create_hosted_zone( - Name="test.a.org.", - CallerReference=str(hash('bar')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="test org 2", - ) - ) - - # test lookup - zones = conn.list_hosted_zones_by_name(DNSName="test.b.com.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("test.b.com.") - zones = conn.list_hosted_zones_by_name(DNSName="test.a.org.") - len(zones["HostedZones"]).should.equal(2) - zones["HostedZones"][0]["Name"].should.equal("test.a.org.") - zones["HostedZones"][1]["Name"].should.equal("test.a.org.") - - # test sort order - zones = conn.list_hosted_zones_by_name() - len(zones["HostedZones"]).should.equal(3) - zones["HostedZones"][0]["Name"].should.equal("test.b.com.") - zones["HostedZones"][1]["Name"].should.equal("test.a.org.") - zones["HostedZones"][2]["Name"].should.equal("test.a.org.") - - -@mock_route53 -def test_change_resource_record_sets_crud_valid(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - - zones = conn.list_hosted_zones_by_name(DNSName="db.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("db.") - hosted_zone_id = zones["HostedZones"][0]["Id"] - - # Create A Record. - a_record_endpoint_payload = { - 'Comment': 'create A record prod.redis.db', - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'A', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(1) - a_record_detail = response['ResourceRecordSets'][0] - a_record_detail['Name'].should.equal('prod.redis.db') - a_record_detail['Type'].should.equal('A') - a_record_detail['TTL'].should.equal(10) - a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) - - # Update type to CNAME - cname_record_endpoint_payload = { - 'Comment': 'Update to CNAME prod.redis.db', - 'Changes': [ - { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', - 'TTL': 60, - 'ResourceRecords': [{ - 'Value': '192.168.1.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=cname_record_endpoint_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(1) - cname_record_detail = response['ResourceRecordSets'][0] - cname_record_detail['Name'].should.equal('prod.redis.db') - cname_record_detail['Type'].should.equal('CNAME') - cname_record_detail['TTL'].should.equal(60) - cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) - - # Delete record. - delete_payload = { - 'Comment': 'delete prod.redis.db', - 'Changes': [ - { - 'Action': 'DELETE', - 'ResourceRecordSet': { - 'Name': 'prod.redis.db', - 'Type': 'CNAME', - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - -@mock_route53 -def test_change_resource_record_invalid(): - conn = boto3.client('route53', region_name='us-east-1') - conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - - zones = conn.list_hosted_zones_by_name(DNSName="db.") - len(zones["HostedZones"]).should.equal(1) - zones["HostedZones"][0]["Name"].should.equal("db.") - hosted_zone_id = zones["HostedZones"][0]["Id"] - - invalid_a_record_payload = { - 'Comment': 'this should fail', - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': 'prod.scooby.doo', - 'Type': 'A', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - - with assert_raises(botocore.exceptions.ClientError): - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - invalid_cname_record_payload = { - 'Comment': 'this should also fail', - 'Changes': [ - { - 'Action': 'UPSERT', - 'ResourceRecordSet': { - 'Name': 'prod.scooby.doo', - 'Type': 'CNAME', - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - - with assert_raises(botocore.exceptions.ClientError): - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload) - - response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) - len(response['ResourceRecordSets']).should.equal(0) - - -@mock_route53 -def test_list_resource_record_sets_name_type_filters(): - conn = boto3.client('route53', region_name='us-east-1') - create_hosted_zone_response = conn.create_hosted_zone( - Name="db.", - CallerReference=str(hash('foo')), - HostedZoneConfig=dict( - PrivateZone=True, - Comment="db", - ) - ) - hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] - - def create_resource_record_set(rec_type, rec_name): - payload = { - 'Comment': 'create {} record {}'.format(rec_type, rec_name), - 'Changes': [ - { - 'Action': 'CREATE', - 'ResourceRecordSet': { - 'Name': rec_name, - 'Type': rec_type, - 'TTL': 10, - 'ResourceRecords': [{ - 'Value': '127.0.0.1' - }] - } - } - ] - } - conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) - - # record_type, record_name - all_records = [ - ('A', 'a.a.db'), - ('A', 'a.b.db'), - ('A', 'b.b.db'), - ('CNAME', 'b.b.db'), - ('CNAME', 'b.c.db'), - ('CNAME', 'c.c.db') - ] - for record_type, record_name in all_records: - create_resource_record_set(record_type, record_name) - - start_with = 2 - response = conn.list_resource_record_sets( - HostedZoneId=hosted_zone_id, - StartRecordType=all_records[start_with][0], - StartRecordName=all_records[start_with][1] - ) - - returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] - len(returned_records).should.equal(len(all_records) - start_with) - for desired_record in all_records[start_with:]: - returned_records.should.contain(desired_record) +from __future__ import unicode_literals + +import boto +import boto3 +from boto.route53.healthcheck import HealthCheck +from boto.route53.record import ResourceRecordSets + +import sure # noqa + +import uuid + +import botocore +from nose.tools import assert_raises + +from moto import mock_route53, mock_route53_deprecated + + +@mock_route53_deprecated +def test_hosted_zone(): + conn = boto.connect_route53('the_key', 'the_secret') + firstzone = conn.create_hosted_zone("testdns.aws.com") + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) + + conn.create_hosted_zone("testdns1.aws.com") + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(2) + + id1 = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + zone = conn.get_hosted_zone(id1) + zone["GetHostedZoneResponse"]["HostedZone"][ + "Name"].should.equal("testdns.aws.com.") + + conn.delete_hosted_zone(id1) + zones = conn.get_all_hosted_zones() + len(zones["ListHostedZonesResponse"]["HostedZones"]).should.equal(1) + + conn.get_hosted_zone.when.called_with("abcd").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") + + +@mock_route53_deprecated +def test_rrset(): + conn = boto.connect_route53('the_key', 'the_secret') + + conn.get_all_rrsets.when.called_with("abcd", type="A").should.throw( + boto.route53.exception.DNSServerError, "404 Not Found") + + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + rrsets = conn.get_all_rrsets(zoneid, type="CNAME") + rrsets.should.have.length_of(0) + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid) + rrsets.should.have.length_of(0) + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("UPSERT", "foo.bar.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('5.6.7.8') + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("DELETE", "foo.bar.testdns.aws.com", "A") + changes.commit() + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + change = changes.add_change("CREATE", "bar.foo.testdns.aws.com", "A") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(2) + + rrsets = conn.get_all_rrsets( + zoneid, name="foo.bar.testdns.aws.com", type="A") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('1.2.3.4') + + rrsets = conn.get_all_rrsets( + zoneid, name="bar.foo.testdns.aws.com", type="A") + rrsets.should.have.length_of(2) + resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] + resource_records.should.contain('1.2.3.4') + resource_records.should.contain('5.6.7.8') + + rrsets = conn.get_all_rrsets( + zoneid, name="foo.foo.testdns.aws.com", type="A") + rrsets.should.have.length_of(0) + + +@mock_route53_deprecated +def test_rrset_with_multiple_values(): + conn = boto.connect_route53('the_key', 'the_secret') + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + change = changes.add_change("CREATE", "foo.bar.testdns.aws.com", "A") + change.add_value("1.2.3.4") + change.add_value("5.6.7.8") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrsets.should.have.length_of(1) + set(rrsets[0].resource_records).should.equal(set(['1.2.3.4', '5.6.7.8'])) + + +@mock_route53_deprecated +def test_alias_rrset(): + conn = boto.connect_route53('the_key', 'the_secret') + zone = conn.create_hosted_zone("testdns.aws.com") + zoneid = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zoneid) + changes.add_change("CREATE", "foo.alias.testdns.aws.com", "A", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="foo.testdns.aws.com") + changes.add_change("CREATE", "bar.alias.testdns.aws.com", "CNAME", + alias_hosted_zone_id="Z3DG6IL3SJCGPX", alias_dns_name="bar.testdns.aws.com") + changes.commit() + + rrsets = conn.get_all_rrsets(zoneid, type="A") + rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] + rrset_records.should.have.length_of(2) + rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) + rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) + rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') + rrsets = conn.get_all_rrsets(zoneid, type="CNAME") + rrsets.should.have.length_of(1) + rrsets[0].resource_records[0].should.equal('bar.testdns.aws.com') + + +@mock_route53_deprecated +def test_create_health_check(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + fqdn="example.com", + string_match="a good response", + request_interval=10, + failure_threshold=2, + ) + conn.create_health_check(check) + + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + check = checks[0] + config = check['HealthCheckConfig'] + config['IPAddress'].should.equal("10.0.0.25") + config['Port'].should.equal("80") + config['Type'].should.equal("HTTP") + config['ResourcePath'].should.equal("/") + config['FullyQualifiedDomainName'].should.equal("example.com") + config['SearchString'].should.equal("a good response") + config['RequestInterval'].should.equal("10") + config['FailureThreshold'].should.equal("2") + + +@mock_route53_deprecated +def test_delete_health_check(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + ) + conn.create_health_check(check) + + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(1) + health_check_id = checks[0]['Id'] + + conn.delete_health_check(health_check_id) + checks = conn.get_list_health_checks()['ListHealthChecksResponse'][ + 'HealthChecks'] + list(checks).should.have.length_of(0) + + +@mock_route53_deprecated +def test_use_health_check_in_resource_record_set(): + conn = boto.connect_route53('the_key', 'the_secret') + + check = HealthCheck( + ip_addr="10.0.0.25", + port=80, + hc_type="HTTP", + resource_path="/", + ) + check = conn.create_health_check( + check)['CreateHealthCheckResponse']['HealthCheck'] + check_id = check['Id'] + + zone = conn.create_hosted_zone("testdns.aws.com") + zone_id = zone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + changes = ResourceRecordSets(conn, zone_id) + change = changes.add_change( + "CREATE", "foo.bar.testdns.aws.com", "A", health_check=check_id) + change.add_value("1.2.3.4") + changes.commit() + + record_sets = conn.get_all_rrsets(zone_id) + record_sets[0].health_check.should.equal(check_id) + + +@mock_route53_deprecated +def test_hosted_zone_comment_preserved(): + conn = boto.connect_route53('the_key', 'the_secret') + + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", comment="test comment") + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(zone_id) + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["Comment"].should.equal("test comment") + + hosted_zones = conn.get_all_hosted_zones() + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["Comment"].should.equal("test comment") + + zone = conn.get_zone("testdns.aws.com.") + zone.config["Comment"].should.equal("test comment") + + +@mock_route53_deprecated +def test_deleting_weighted_route(): + conn = boto.connect_route53() + + conn.create_hosted_zone("testdns.aws.com.") + zone = conn.get_zone("testdns.aws.com.") + + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', '50')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', '50')) + + cnames = zone.get_cname('cname.testdns.aws.com.', all=True) + cnames.should.have.length_of(2) + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] + + zone.delete_record(foo_cname) + cname = zone.get_cname('cname.testdns.aws.com.', all=True) + # When get_cname only had one result, it returns just that result instead + # of a list. + cname.identifier.should.equal('success-test-bar') + + +@mock_route53_deprecated +def test_deleting_latency_route(): + conn = boto.connect_route53() + + conn.create_hosted_zone("testdns.aws.com.") + zone = conn.get_zone("testdns.aws.com.") + + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-foo', 'us-west-2')) + zone.add_cname("cname.testdns.aws.com", "example.com", + identifier=('success-test-bar', 'us-west-1')) + + cnames = zone.get_cname('cname.testdns.aws.com.', all=True) + cnames.should.have.length_of(2) + foo_cname = [cname for cname in cnames if cname.identifier == + 'success-test-foo'][0] + foo_cname.region.should.equal('us-west-2') + + zone.delete_record(foo_cname) + cname = zone.get_cname('cname.testdns.aws.com.', all=True) + # When get_cname only had one result, it returns just that result instead + # of a list. + cname.identifier.should.equal('success-test-bar') + cname.region.should.equal('us-west-1') + + +@mock_route53_deprecated +def test_hosted_zone_private_zone_preserved(): + conn = boto.connect_route53('the_key', 'the_secret') + + firstzone = conn.create_hosted_zone( + "testdns.aws.com.", private_zone=True, vpc_id='vpc-fake', vpc_region='us-east-1') + zone_id = firstzone["CreateHostedZoneResponse"][ + "HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(zone_id) + # in (original) boto, these bools returned as strings. + hosted_zone["GetHostedZoneResponse"]["HostedZone"][ + "Config"]["PrivateZone"].should.equal('True') + + hosted_zones = conn.get_all_hosted_zones() + hosted_zones["ListHostedZonesResponse"]["HostedZones"][ + 0]["Config"]["PrivateZone"].should.equal('True') + + zone = conn.get_zone("testdns.aws.com.") + zone.config["PrivateZone"].should.equal('True') + + +@mock_route53 +def test_hosted_zone_private_zone_preserved_boto3(): + conn = boto3.client('route53', region_name='us-east-1') + # TODO: actually create_hosted_zone statements with PrivateZone=True, but without + # a _valid_ vpc-id should fail. + firstzone = conn.create_hosted_zone( + Name="testdns.aws.com.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="Test", + ) + ) + + zone_id = firstzone["HostedZone"]["Id"].split("/")[-1] + + hosted_zone = conn.get_hosted_zone(Id=zone_id) + hosted_zone["HostedZone"]["Config"]["PrivateZone"].should.equal(True) + + hosted_zones = conn.list_hosted_zones() + hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) + + hosted_zones = conn.list_hosted_zones_by_name(DNSName="testdns.aws.com.") + len(hosted_zones["HostedZones"]).should.equal(1) + hosted_zones["HostedZones"][0]["Config"]["PrivateZone"].should.equal(True) + + +@mock_route53 +def test_list_or_change_tags_for_resource_request(): + conn = boto3.client('route53', region_name='us-east-1') + health_check = conn.create_health_check( + CallerReference='foobar', + HealthCheckConfig={ + 'IPAddress': '192.0.2.44', + 'Port': 123, + 'Type': 'HTTP', + 'ResourcePath': '/', + 'RequestInterval': 30, + 'FailureThreshold': 123, + 'HealthThreshold': 123, + } + ) + healthcheck_id = health_check['HealthCheck']['Id'] + + tag1 = {"Key": "Deploy", "Value": "True"} + tag2 = {"Key": "Name", "Value": "UnitTest"} + + # Test adding a tag for a resource id + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + AddTags=[tag1, tag2] + ) + + # Check to make sure that the response has the 'ResourceTagSet' key + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response.should.contain('ResourceTagSet') + + # Validate that each key was added + response['ResourceTagSet']['Tags'].should.contain(tag1) + response['ResourceTagSet']['Tags'].should.contain(tag2) + + len(response['ResourceTagSet']['Tags']).should.equal(2) + + # Try to remove the tags + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag1['Key']] + ) + + # Check to make sure that the response has the 'ResourceTagSet' key + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response.should.contain('ResourceTagSet') + response['ResourceTagSet']['Tags'].should_not.contain(tag1) + response['ResourceTagSet']['Tags'].should.contain(tag2) + + # Remove the second tag + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag2['Key']] + ) + + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response['ResourceTagSet']['Tags'].should_not.contain(tag2) + + # Re-add the tags + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + AddTags=[tag1, tag2] + ) + + # Remove both + conn.change_tags_for_resource( + ResourceType='healthcheck', + ResourceId=healthcheck_id, + RemoveTagKeys=[tag1['Key'], tag2['Key']] + ) + + response = conn.list_tags_for_resource( + ResourceType='healthcheck', ResourceId=healthcheck_id) + response['ResourceTagSet']['Tags'].should.be.empty + + +@mock_route53 +def test_list_hosted_zones_by_name(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="test.b.com.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test com", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org", + ) + ) + conn.create_hosted_zone( + Name="test.a.org.", + CallerReference=str(hash('bar')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="test org 2", + ) + ) + + # test lookup + zones = conn.list_hosted_zones_by_name(DNSName="test.b.com.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones = conn.list_hosted_zones_by_name(DNSName="test.a.org.") + len(zones["HostedZones"]).should.equal(2) + zones["HostedZones"][0]["Name"].should.equal("test.a.org.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + + # test sort order + zones = conn.list_hosted_zones_by_name() + len(zones["HostedZones"]).should.equal(3) + zones["HostedZones"][0]["Name"].should.equal("test.b.com.") + zones["HostedZones"][1]["Name"].should.equal("test.a.org.") + zones["HostedZones"][2]["Name"].should.equal("test.a.org.") + + +@mock_route53 +def test_change_resource_record_sets_crud_valid(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + # Create A Record. + a_record_endpoint_payload = { + 'Comment': 'create A record prod.redis.db', + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'A', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=a_record_endpoint_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(1) + a_record_detail = response['ResourceRecordSets'][0] + a_record_detail['Name'].should.equal('prod.redis.db') + a_record_detail['Type'].should.equal('A') + a_record_detail['TTL'].should.equal(10) + a_record_detail['ResourceRecords'].should.equal([{'Value': '127.0.0.1'}]) + + # Update type to CNAME + cname_record_endpoint_payload = { + 'Comment': 'Update to CNAME prod.redis.db', + 'Changes': [ + { + 'Action': 'UPSERT', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'CNAME', + 'TTL': 60, + 'ResourceRecords': [{ + 'Value': '192.168.1.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=cname_record_endpoint_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(1) + cname_record_detail = response['ResourceRecordSets'][0] + cname_record_detail['Name'].should.equal('prod.redis.db') + cname_record_detail['Type'].should.equal('CNAME') + cname_record_detail['TTL'].should.equal(60) + cname_record_detail['ResourceRecords'].should.equal([{'Value': '192.168.1.1'}]) + + # Delete record. + delete_payload = { + 'Comment': 'delete prod.redis.db', + 'Changes': [ + { + 'Action': 'DELETE', + 'ResourceRecordSet': { + 'Name': 'prod.redis.db', + 'Type': 'CNAME', + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=delete_payload) + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_change_resource_record_invalid(): + conn = boto3.client('route53', region_name='us-east-1') + conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + + zones = conn.list_hosted_zones_by_name(DNSName="db.") + len(zones["HostedZones"]).should.equal(1) + zones["HostedZones"][0]["Name"].should.equal("db.") + hosted_zone_id = zones["HostedZones"][0]["Id"] + + invalid_a_record_payload = { + 'Comment': 'this should fail', + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': 'prod.scooby.doo', + 'Type': 'A', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + + with assert_raises(botocore.exceptions.ClientError): + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_a_record_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + invalid_cname_record_payload = { + 'Comment': 'this should also fail', + 'Changes': [ + { + 'Action': 'UPSERT', + 'ResourceRecordSet': { + 'Name': 'prod.scooby.doo', + 'Type': 'CNAME', + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + + with assert_raises(botocore.exceptions.ClientError): + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=invalid_cname_record_payload) + + response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) + len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_list_resource_record_sets_name_type_filters(): + conn = boto3.client('route53', region_name='us-east-1') + create_hosted_zone_response = conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] + + def create_resource_record_set(rec_type, rec_name): + payload = { + 'Comment': 'create {} record {}'.format(rec_type, rec_name), + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': rec_name, + 'Type': rec_type, + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) + + # record_type, record_name + all_records = [ + ('A', 'a.a.db'), + ('A', 'a.b.db'), + ('A', 'b.b.db'), + ('CNAME', 'b.b.db'), + ('CNAME', 'b.c.db'), + ('CNAME', 'c.c.db') + ] + for record_type, record_name in all_records: + create_resource_record_set(record_type, record_name) + + start_with = 2 + response = conn.list_resource_record_sets( + HostedZoneId=hosted_zone_id, + StartRecordType=all_records[start_with][0], + StartRecordName=all_records[start_with][1] + ) + + returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] + len(returned_records).should.equal(len(all_records) - start_with) + for desired_record in all_records[start_with:]: + returned_records.should.contain(desired_record) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 6e339abb6..aa9050e04 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,2583 +1,2583 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals - -import datetime -from six.moves.urllib.request import urlopen -from six.moves.urllib.error import HTTPError -from functools import wraps -from gzip import GzipFile -from io import BytesIO -import zlib - -import json -import boto -import boto3 -from botocore.client import ClientError -import botocore.exceptions -from boto.exception import S3CreateError, S3ResponseError -from botocore.handlers import disable_signing -from boto.s3.connection import S3Connection -from boto.s3.key import Key -from freezegun import freeze_time -import six -import requests -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises - -import sure # noqa - -from moto import settings, mock_s3, mock_s3_deprecated -import moto.s3.models as s3model - -if settings.TEST_SERVER_MODE: - REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE - EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' -else: - REDUCED_PART_SIZE = 256 - EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"' - - -def reduced_min_part_size(f): - """ speed up tests by temporarily making the multipart minimum part size - small - """ - orig_size = s3model.UPLOAD_PART_MIN_SIZE - - @wraps(f) - def wrapped(*args, **kwargs): - try: - s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE - return f(*args, **kwargs) - finally: - s3model.UPLOAD_PART_MIN_SIZE = orig_size - - return wrapped - - -class MyModel(object): - - def __init__(self, name, value): - self.name = name - self.value = value - - def save(self): - s3 = boto3.client('s3', region_name='us-east-1') - s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) - - -@mock_s3 -def test_my_model_save(): - # Create Bucket so that test can run - conn = boto3.resource('s3', region_name='us-east-1') - conn.create_bucket(Bucket='mybucket') - #################################### - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() - - assert body == 'is awesome' - - -@mock_s3 -def test_key_etag(): - conn = boto3.resource('s3', region_name='us-east-1') - conn.create_bucket(Bucket='mybucket') - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - conn.Bucket('mybucket').Object('steve').e_tag.should.equal( - '"d32bda93738f7e03adb22e66c90fbc04"') - - -@mock_s3_deprecated -def test_multipart_upload_too_small(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - multipart.upload_part_from_file(BytesIO(b'hello'), 1) - multipart.upload_part_from_file(BytesIO(b'world'), 2) - # Multipart with total size under 5MB is refused - multipart.complete_upload.should.throw(S3ResponseError) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_out_of_order(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 4) - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_with_headers(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload( - "the-key", metadata={"foo": "bar"}) - part1 = b'0' * 10 - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.complete_upload() - - key = bucket.get_key("the-key") - key.metadata.should.equal({"foo": "bar"}) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_with_copy_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "original-key" - key.set_contents_from_string("key_value") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) - multipart.complete_upload() - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + b"key_") - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_upload_cancel(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - multipart.cancel_upload() - # TODO we really need some sort of assertion here, but we don't currently - # have the ability to list mulipart uploads for a bucket. - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_etag(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # last part, can be less than 5 MB - part2 = b'1' - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # we should get both parts as the key contents - bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) - - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_invalid_order(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * 5242880 - etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag - # last part, can be less than 5 MB - part2 = b'1' - etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag - xml = "{0}{1}" - xml = xml.format(2, etag2) + xml.format(1, etag1) - xml = "{0}".format(xml) - bucket.complete_multipart_upload.when.called_with( - multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_etag_quotes_stripped(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag - # last part, can be less than 5 MB - part2 = b'1' - etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag - # Strip quotes from etags - etag1 = etag1.replace('"','') - etag2 = etag2.replace('"','') - xml = "{0}{1}" - xml = xml.format(1, etag1) + xml.format(2, etag2) - xml = "{0}".format(xml) - bucket.complete_multipart_upload.when.called_with( - multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError) - # we should get both parts as the key contents - bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) - -@mock_s3_deprecated -@reduced_min_part_size -def test_multipart_duplicate_upload(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - multipart = bucket.initiate_multipart_upload("the-key") - part1 = b'0' * REDUCED_PART_SIZE - multipart.upload_part_from_file(BytesIO(part1), 1) - # same part again - multipart.upload_part_from_file(BytesIO(part1), 1) - part2 = b'1' * 1024 - multipart.upload_part_from_file(BytesIO(part2), 2) - multipart.complete_upload() - # We should get only one copy of part 1. - bucket.get_key( - "the-key").get_contents_as_string().should.equal(part1 + part2) - - -@mock_s3_deprecated -def test_list_multiparts(): - # Create Bucket so that test can run - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('mybucket') - - multipart1 = bucket.initiate_multipart_upload("one-key") - multipart2 = bucket.initiate_multipart_upload("two-key") - uploads = bucket.get_all_multipart_uploads() - uploads.should.have.length_of(2) - dict([(u.key_name, u.id) for u in uploads]).should.equal( - {'one-key': multipart1.id, 'two-key': multipart2.id}) - multipart2.cancel_upload() - uploads = bucket.get_all_multipart_uploads() - uploads.should.have.length_of(1) - uploads[0].key_name.should.equal("one-key") - multipart1.cancel_upload() - uploads = bucket.get_all_multipart_uploads() - uploads.should.be.empty - - -@mock_s3_deprecated -def test_key_save_to_missing_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.get_bucket('mybucket', validate=False) - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string.when.called_with( - "foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_missing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.get_key("the-key").should.equal(None) - - -@mock_s3_deprecated -def test_missing_key_urllib2(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket("foobar") - - urlopen.when.called_with( - "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) - - -@mock_s3_deprecated -def test_empty_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("") - - key = bucket.get_key("the-key") - key.size.should.equal(0) - key.get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_empty_key_set_on_existing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar") - - key = bucket.get_key("the-key") - key.size.should.equal(6) - key.get_contents_as_string().should.equal(b'foobar') - - key.set_contents_from_string("") - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_large_key_save(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar" * 100000) - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) - - -@mock_s3_deprecated -def test_copy_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_copy_key_with_version(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.configure_versioning(versioning=True) - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - key.set_contents_from_string("another value") - - bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"another value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_set_metadata(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = 'the-key' - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("Testval") - - bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') - - -@mock_s3_deprecated -def test_copy_key_replace_metadata(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key', - metadata={'momd': 'Mometadatastring'}) - - bucket.get_key("new-key").get_metadata('md').should.be.none - bucket.get_key( - "new-key").get_metadata('momd').should.equal('Mometadatastring') - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_last_modified(): - # See https://github.com/boto/boto/issues/466 - conn = boto.connect_s3() - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - rs = bucket.get_all_keys() - rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - - bucket.get_key( - "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') - - -@mock_s3_deprecated -def test_missing_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_with_dash(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.get_bucket.when.called_with( - 'mybucket-test').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_create_existing_bucket(): - "Trying to create a bucket that already exists should raise an Error" - conn = boto.s3.connect_to_region("us-west-2") - conn.create_bucket("foobar") - with assert_raises(S3CreateError): - conn.create_bucket('foobar') - - -@mock_s3_deprecated -def test_create_existing_bucket_in_us_east_1(): - "Trying to create a bucket that already exists in us-east-1 returns the bucket" - - """" - http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html - Your previous request to create the named bucket succeeded and you already - own it. You get this error in all AWS regions except US Standard, - us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if - bucket exists it Amazon S3 will not do anything). - """ - conn = boto.s3.connect_to_region("us-east-1") - conn.create_bucket("foobar") - bucket = conn.create_bucket("foobar") - bucket.name.should.equal("foobar") - - -@mock_s3_deprecated -def test_other_region(): - conn = S3Connection( - 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') - conn.create_bucket("foobar") - list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) - - -@mock_s3_deprecated -def test_bucket_deletion(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - # Try to delete a bucket that still has keys - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - bucket.delete_key("the-key") - conn.delete_bucket("foobar") - - # Get non-existing bucket - conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - # Delete non-existant bucket - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_get_all_buckets(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket("foobar") - conn.create_bucket("foobar2") - buckets = conn.get_all_buckets() - - buckets.should.have.length_of(2) - - -@mock_s3 -@mock_s3_deprecated -def test_post_to_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://foobar.s3.amazonaws.com/", { - 'key': 'the-key', - 'file': 'nothing' - }) - - bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') - - -@mock_s3 -@mock_s3_deprecated -def test_post_with_metadata_to_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://foobar.s3.amazonaws.com/", { - 'key': 'the-key', - 'file': 'nothing', - 'x-amz-meta-test': 'metadata' - }) - - bucket.get_key('the-key').get_metadata('test').should.equal('metadata') - - -@mock_s3_deprecated -def test_delete_missing_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - deleted_key = bucket.delete_key("foobar") - deleted_key.key.should.equal("foobar") - - -@mock_s3_deprecated -def test_delete_keys(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['file2', 'file3']) - result.deleted.should.have.length_of(2) - result.errors.should.have.length_of(0) - keys = bucket.get_all_keys() - keys.should.have.length_of(2) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_delete_keys_with_invalid(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['abc', 'file3']) - - result.deleted.should.have.length_of(1) - result.errors.should.have.length_of(1) - keys = bucket.get_all_keys() - keys.should.have.length_of(3) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_bucket_name_with_dot(): - conn = boto.connect_s3() - bucket = conn.create_bucket('firstname.lastname') - - k = Key(bucket, 'somekey') - k.set_contents_from_string('somedata') - - -@mock_s3_deprecated -def test_key_with_special_characters(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_list_keys_2/x?y') - key.set_contents_from_string('value1') - - key_list = bucket.list('test_list_keys_2/', '/') - keys = [x for x in key_list] - keys[0].name.should.equal("test_list_keys_2/x?y") - - -@mock_s3_deprecated -def test_unicode_key_with_slash(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "/the-key-unîcode/test" - key.set_contents_from_string("value") - - key = bucket.get_key("/the-key-unîcode/test") - key.get_contents_as_string().should.equal(b'value') - - -@mock_s3_deprecated -def test_bucket_key_listing_order(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket') - prefix = 'toplevel/' - - def store(name): - k = Key(bucket, prefix + name) - k.set_contents_from_string('somedata') - - names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] - - for name in names: - store(name) - - delimiter = None - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' - ]) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' - ]) - - # Test delimiter with no prefix - delimiter = '/' - keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] - keys.should.equal(['toplevel/']) - - delimiter = None - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal( - [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal([u'toplevel/x/']) - - -@mock_s3_deprecated -def test_key_with_reduced_redundancy(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_rr_key') - key.set_contents_from_string('value1', reduced_redundancy=True) - # we use the bucket iterator because of: - # https:/github.com/boto/boto/issues/1173 - list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') - - -@mock_s3_deprecated -def test_copy_key_reduced_redundancy(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key', - storage_class='REDUCED_REDUNDANCY') - - # we use the bucket iterator because of: - # https:/github.com/boto/boto/issues/1173 - keys = dict([(k.name, k) for k in bucket]) - keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") - keys['the-key'].storage_class.should.equal("STANDARD") - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_restore_key(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - list(bucket)[0].ongoing_restore.should.be.none - key.restore(1) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") - key.restore(2) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT") - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_restore_key_headers(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - key.restore(1, headers={'foo': 'bar'}) - key = bucket.get_key('the-key') - key.ongoing_restore.should_not.be.none - key.ongoing_restore.should.be.false - key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") - - -@mock_s3_deprecated -def test_get_versioning_status(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - d = bucket.get_versioning_status() - d.should.be.empty - - bucket.configure_versioning(versioning=True) - d = bucket.get_versioning_status() - d.shouldnt.be.empty - d.should.have.key('Versioning').being.equal('Enabled') - - bucket.configure_versioning(versioning=False) - d = bucket.get_versioning_status() - d.should.have.key('Versioning').being.equal('Suspended') - - -@mock_s3_deprecated -def test_key_version(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - bucket.configure_versioning(versioning=True) - - key = Key(bucket) - key.key = 'the-key' - key.version_id.should.be.none - key.set_contents_from_string('some string') - key.version_id.should.equal('0') - key.set_contents_from_string('some string') - key.version_id.should.equal('1') - - key = bucket.get_key('the-key') - key.version_id.should.equal('1') - - -@mock_s3_deprecated -def test_list_versions(): - conn = boto.connect_s3('the_key', 'the_secret') - bucket = conn.create_bucket('foobar') - bucket.configure_versioning(versioning=True) - - key = Key(bucket, 'the-key') - key.version_id.should.be.none - key.set_contents_from_string("Version 1") - key.version_id.should.equal('0') - key.set_contents_from_string("Version 2") - key.version_id.should.equal('1') - - versions = list(bucket.list_versions()) - - versions.should.have.length_of(2) - - versions[0].name.should.equal('the-key') - versions[0].version_id.should.equal('0') - versions[0].get_contents_as_string().should.equal(b"Version 1") - - versions[1].name.should.equal('the-key') - versions[1].version_id.should.equal('1') - versions[1].get_contents_as_string().should.equal(b"Version 2") - - key = Key(bucket, 'the2-key') - key.set_contents_from_string("Version 1") - - keys = list(bucket.list()) - keys.should.have.length_of(2) - versions = list(bucket.list_versions(prefix='the2-')) - versions.should.have.length_of(1) - - -@mock_s3_deprecated -def test_acl_setting(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content) - key.make_public() - - key = bucket.get_key(keyname) - - assert key.get_contents_as_string() == content - - grants = key.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_acl_setting_via_headers(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content, headers={ - 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' - }) - - key = bucket.get_key(keyname) - - assert key.get_contents_as_string() == content - - grants = key.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'FULL_CONTROL' for g in grants), grants - - -@mock_s3_deprecated -def test_acl_switching(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - content = b'imafile' - keyname = 'test.txt' - - key = Key(bucket, name=keyname) - key.content_type = 'text/plain' - key.set_contents_from_string(content, policy='public-read') - key.set_acl('private') - - grants = key.get_acl().acl.grants - assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_bucket_acl_setting(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - - bucket.make_public() - - grants = bucket.get_acl().acl.grants - assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3_deprecated -def test_bucket_acl_switching(): - conn = boto.connect_s3() - bucket = conn.create_bucket('foobar') - bucket.make_public() - - bucket.set_acl('private') - - grants = bucket.get_acl().acl.grants - assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and - g.permission == 'READ' for g in grants), grants - - -@mock_s3 -def test_s3_object_in_public_bucket(): - s3 = boto3.resource('s3') - bucket = s3.Bucket('test-bucket') - bucket.create(ACL='public-read') - bucket.put_object(Body=b'ABCD', Key='file.txt') - - s3_anonymous = boto3.resource('s3') - s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - - contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - contents.should.equal(b'ABCD') - - bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - - with assert_raises(ClientError) as exc: - s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - exc.exception.response['Error']['Code'].should.equal('403') - - params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} - presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) - response = requests.get(presigned_url) - assert response.status_code == 200 - - -@mock_s3 -def test_s3_object_in_private_bucket(): - s3 = boto3.resource('s3') - bucket = s3.Bucket('test-bucket') - bucket.create(ACL='private') - bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') - - s3_anonymous = boto3.resource('s3') - s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) - - with assert_raises(ClientError) as exc: - s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() - exc.exception.response['Error']['Code'].should.equal('403') - - bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') - contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() - contents.should.equal(b'ABCD') - - -@mock_s3_deprecated -def test_unicode_key(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = u'こんにちは.jpg' - key.set_contents_from_string('Hello world!') - assert [listed_key.key for listed_key in bucket.list()] == [key.key] - fetched_key = bucket.get_key(key.key) - assert fetched_key.key == key.key - assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!' - - -@mock_s3_deprecated -def test_unicode_value(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = 'some_key' - key.set_contents_from_string(u'こんにちは.jpg') - list(bucket.list()) - key = bucket.get_key(key.key) - assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg' - - -@mock_s3_deprecated -def test_setting_content_encoding(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = bucket.new_key("keyname") - key.set_metadata("Content-Encoding", "gzip") - compressed_data = "abcdef" - key.set_contents_from_string(compressed_data) - - key = bucket.get_key("keyname") - key.content_encoding.should.equal("gzip") - - -@mock_s3_deprecated -def test_bucket_location(): - conn = boto.s3.connect_to_region("us-west-2") - bucket = conn.create_bucket('mybucket') - bucket.get_location().should.equal("us-west-2") - - -@mock_s3_deprecated -def test_ranged_get(): - conn = boto.connect_s3() - bucket = conn.create_bucket('mybucket') - key = Key(bucket) - key.key = 'bigkey' - rep = b"0123456789" - key.set_contents_from_string(rep * 10) - - # Implicitly bounded range requests. - key.get_contents_as_string( - headers={'Range': 'bytes=0-'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=50-'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=99-'}).should.equal(b'9') - - # Explicitly bounded range requests starting from the first byte. - key.get_contents_as_string( - headers={'Range': 'bytes=0-0'}).should.equal(b'0') - key.get_contents_as_string( - headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) - - # Explicitly bounded range requests starting from the / a middle byte. - key.get_contents_as_string( - headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) - key.get_contents_as_string( - headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) - key.get_contents_as_string( - headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) - - # Explicitly bounded range requests starting from the last byte. - key.get_contents_as_string( - headers={'Range': 'bytes=99-99'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=99-100'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=99-700'}).should.equal(b'9') - - # Suffix range requests. - key.get_contents_as_string( - headers={'Range': 'bytes=-1'}).should.equal(b'9') - key.get_contents_as_string( - headers={'Range': 'bytes=-60'}).should.equal(rep * 6) - key.get_contents_as_string( - headers={'Range': 'bytes=-100'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=-101'}).should.equal(rep * 10) - key.get_contents_as_string( - headers={'Range': 'bytes=-700'}).should.equal(rep * 10) - - key.size.should.equal(100) - - -@mock_s3_deprecated -def test_policy(): - conn = boto.connect_s3() - bucket_name = 'mybucket' - bucket = conn.create_bucket(bucket_name) - - policy = json.dumps({ - "Version": "2012-10-17", - "Id": "PutObjPolicy", - "Statement": [ - { - "Sid": "DenyUnEncryptedObjectUploads", - "Effect": "Deny", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::{bucket_name}/*".format(bucket_name=bucket_name), - "Condition": { - "StringNotEquals": { - "s3:x-amz-server-side-encryption": "aws:kms" - } - } - } - ] - }) - - with assert_raises(S3ResponseError) as err: - bucket.get_policy() - - ex = err.exception - ex.box_usage.should.be.none - ex.error_code.should.equal('NoSuchBucketPolicy') - ex.message.should.equal('The bucket policy does not exist') - ex.reason.should.equal('Not Found') - ex.resource.should.be.none - ex.status.should.equal(404) - ex.body.should.contain(bucket_name) - ex.request_id.should_not.be.none - - bucket.set_policy(policy).should.be.true - - bucket = conn.get_bucket(bucket_name) - - bucket.get_policy().decode('utf-8').should.equal(policy) - - bucket.delete_policy() - - with assert_raises(S3ResponseError) as err: - bucket.get_policy() - - -@mock_s3_deprecated -def test_website_configuration_xml(): - conn = boto.connect_s3() - bucket = conn.create_bucket('test-bucket') - bucket.set_website_configuration_xml(TEST_XML) - bucket.get_website_configuration_xml().should.equal(TEST_XML) - - -@mock_s3_deprecated -def test_key_with_trailing_slash_in_ordinary_calling_format(): - conn = boto.connect_s3( - 'access_key', - 'secret_key', - calling_format=boto.s3.connection.OrdinaryCallingFormat() - ) - bucket = conn.create_bucket('test_bucket_name') - - key_name = 'key_with_slash/' - - key = Key(bucket, key_name) - key.set_contents_from_string('some value') - - [k.name for k in bucket.get_all_keys()].should.contain(key_name) - - -""" -boto3 -""" - - -@mock_s3 -def test_boto3_key_etag(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') - - -@mock_s3 -def test_website_redirect_location(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp.get('WebsiteRedirectLocation').should.be.none - - url = 'https://github.com/spulec/moto' - s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome', WebsiteRedirectLocation=url) - resp = s3.get_object(Bucket='mybucket', Key='steve') - resp['WebsiteRedirectLocation'].should.equal(url) - - -@mock_s3 -def test_boto3_list_keys_xml_escaped(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - key_name = 'Q&A.txt' - s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') - - resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) - - assert resp['Contents'][0]['Key'] == key_name - assert resp['KeyCount'] == 1 - assert resp['MaxKeys'] == 1000 - assert resp['Prefix'] == key_name - assert resp['IsTruncated'] == False - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'NextContinuationToken' not in resp - assert 'Owner' not in resp['Contents'][0] - - -@mock_s3 -def test_boto3_list_objects_v2_truncated_response(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'1') - s3.put_object(Bucket='mybucket', Key='two', Body=b'22') - s3.put_object(Bucket='mybucket', Key='three', Body=b'333') - - # First list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'one' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'Owner' not in listed_object # owner info was not requested - - next_token = resp['NextContinuationToken'] - - # Second list - resp = s3.list_objects_v2( - Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'three' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert 'Delimiter' not in resp - assert 'StartAfter' not in resp - assert 'Owner' not in listed_object - - next_token = resp['NextContinuationToken'] - - # Third list - resp = s3.list_objects_v2( - Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'two' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == False - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - assert 'StartAfter' not in resp - assert 'NextContinuationToken' not in resp - - -@mock_s3 -def test_boto3_list_objects_v2_truncated_response_start_after(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'1') - s3.put_object(Bucket='mybucket', Key='two', Body=b'22') - s3.put_object(Bucket='mybucket', Key='three', Body=b'333') - - # First list - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one') - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'three' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == True - assert resp['StartAfter'] == 'one' - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - - next_token = resp['NextContinuationToken'] - - # Second list - # The ContinuationToken must take precedence over StartAfter. - resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', - ContinuationToken=next_token) - listed_object = resp['Contents'][0] - - assert listed_object['Key'] == 'two' - assert resp['MaxKeys'] == 1 - assert resp['Prefix'] == '' - assert resp['KeyCount'] == 1 - assert resp['IsTruncated'] == False - # When ContinuationToken is given, StartAfter is ignored. This also means - # AWS does not return it in the response. - assert 'StartAfter' not in resp - assert 'Delimiter' not in resp - assert 'Owner' not in listed_object - - -@mock_s3 -def test_boto3_list_objects_v2_fetch_owner(): - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - s3.put_object(Bucket='mybucket', Key='one', Body=b'11') - - resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True) - owner = resp['Contents'][0]['Owner'] - - assert 'ID' in owner - assert 'DisplayName' in owner - assert len(owner.keys()) == 2 - - -@mock_s3 -def test_boto3_bucket_create(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').get()['Body'].read().decode( - "utf-8").should.equal("some text") - - -@mock_s3 -def test_bucket_create_duplicate(): - s3 = boto3.resource('s3', region_name='us-west-2') - s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ - 'LocationConstraint': 'us-west-2', - }) - with assert_raises(ClientError) as exc: - s3.create_bucket( - Bucket="blah", - CreateBucketConfiguration={ - 'LocationConstraint': 'us-west-2', - } - ) - exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') - - -@mock_s3 -def test_boto3_bucket_create_eu_central(): - s3 = boto3.resource('s3', region_name='eu-central-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').get()['Body'].read().decode( - "utf-8").should.equal("some text") - - -@mock_s3 -def test_boto3_head_object(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - - with assert_raises(ClientError) as e: - s3.Object('blah', 'hello2.txt').meta.client.head_object( - Bucket='blah', Key='hello_bad.txt') - e.exception.response['Error']['Code'].should.equal('404') - - -@mock_s3 -def test_boto3_bucket_deletion(): - cli = boto3.client('s3', region_name='us-east-1') - cli.create_bucket(Bucket="foobar") - - cli.put_object(Bucket="foobar", Key="the-key", Body="some value") - - # Try to delete a bucket that still has keys - cli.delete_bucket.when.called_with(Bucket="foobar").should.throw( - cli.exceptions.ClientError, - ('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: ' - 'The bucket you tried to delete is not empty')) - - cli.delete_object(Bucket="foobar", Key="the-key") - cli.delete_bucket(Bucket="foobar") - - # Get non-existing bucket - cli.head_bucket.when.called_with(Bucket="foobar").should.throw( - cli.exceptions.ClientError, - "An error occurred (404) when calling the HeadBucket operation: Not Found") - - # Delete non-existing bucket - cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket) - - -@mock_s3 -def test_boto3_get_object(): - s3 = boto3.resource('s3', region_name='us-east-1') - s3.create_bucket(Bucket="blah") - - s3.Object('blah', 'hello.txt').put(Body="some text") - - s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - - with assert_raises(ClientError) as e: - s3.Object('blah', 'hello2.txt').get() - - e.exception.response['Error']['Code'].should.equal('NoSuchKey') - - -@mock_s3 -def test_boto3_head_object_with_versioning(): - s3 = boto3.resource('s3', region_name='us-east-1') - bucket = s3.create_bucket(Bucket='blah') - bucket.Versioning().enable() - - old_content = 'some text' - new_content = 'some new text' - s3.Object('blah', 'hello.txt').put(Body=old_content) - s3.Object('blah', 'hello.txt').put(Body=new_content) - - head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt') - head_object['VersionId'].should.equal('1') - head_object['ContentLength'].should.equal(len(new_content)) - - old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( - Bucket='blah', Key='hello.txt', VersionId='0') - old_head_object['VersionId'].should.equal('0') - old_head_object['ContentLength'].should.equal(len(old_content)) - - -@mock_s3 -def test_boto3_copy_object_with_versioning(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.put_object(Bucket='blah', Key='test2', Body=b'test2') - - obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] - obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] - - # Versions should be the same - obj1_version.should.equal(obj2_version) - - client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') - obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] - - # Version should be different to previous version - obj2_version_new.should_not.equal(obj2_version) - - -@mock_s3 -def test_boto3_deleted_versionings_list(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah') - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.put_object(Bucket='blah', Key='test2', Body=b'test2') - client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) - - listed = client.list_objects_v2(Bucket='blah') - assert len(listed['Contents']) == 1 - - -@mock_s3 -def test_boto3_delete_versioned_bucket(): - client = boto3.client('s3', region_name='us-east-1') - - client.create_bucket(Bucket='blah') - client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) - - resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1') - client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"]) - - client.delete_bucket(Bucket='blah') - - -@mock_s3 -def test_boto3_head_object_if_modified_since(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = "blah" - s3.create_bucket(Bucket=bucket_name) - - key = 'hello.txt' - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - with assert_raises(botocore.exceptions.ClientError) as err: - s3.head_object( - Bucket=bucket_name, - Key=key, - IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) - ) - e = err.exception - e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) - - -@mock_s3 -@reduced_min_part_size -def test_boto3_multipart_etag(): - # Create Bucket so that test can run - s3 = boto3.client('s3', region_name='us-east-1') - s3.create_bucket(Bucket='mybucket') - - upload_id = s3.create_multipart_upload( - Bucket='mybucket', Key='the-key')['UploadId'] - part1 = b'0' * REDUCED_PART_SIZE - etags = [] - etags.append( - s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=1, - UploadId=upload_id, Body=part1)['ETag']) - # last part, can be less than 5 MB - part2 = b'1' - etags.append( - s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=2, - UploadId=upload_id, Body=part2)['ETag']) - s3.complete_multipart_upload( - Bucket='mybucket', Key='the-key', UploadId=upload_id, - MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': i} - for i, etag in enumerate(etags, 1)]}) - # we should get both parts as the key contents - resp = s3.get_object(Bucket='mybucket', Key='the-key') - resp['ETag'].should.equal(EXPECTED_ETAG) - - -@mock_s3 -def test_boto3_put_object_with_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test', - Tagging='foo=bar', - ) - - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - - resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) - - -@mock_s3 -def test_boto3_put_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - # With 1 tag: - resp = s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - } - ] - }) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - # With multiple tags: - resp = s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - # No tags is also OK: - resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ - "TagSet": [] - }) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_get_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - # Get the tags for the bucket: - resp = s3.get_bucket_tagging(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - len(resp["TagSet"]).should.equal(2) - - # With no tags: - s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ - "TagSet": [] - }) - - with assert_raises(ClientError) as err: - s3.get_bucket_tagging(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchTagSet") - e.response["Error"]["Message"].should.equal("The TagSet does not exist") - - -@mock_s3 -def test_boto3_delete_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - s3.put_bucket_tagging(Bucket=bucket_name, - Tagging={ - "TagSet": [ - { - "Key": "TagOne", - "Value": "ValueOne" - }, - { - "Key": "TagTwo", - "Value": "ValueTwo" - } - ] - }) - - resp = s3.delete_bucket_tagging(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - - with assert_raises(ClientError) as err: - s3.get_bucket_tagging(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchTagSet") - e.response["Error"]["Message"].should.equal("The TagSet does not exist") - - -@mock_s3 -def test_boto3_put_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET", - "POST" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - }, - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "PUT" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - } - ] - }) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "NOTREAL", - "POST" - ] - } - ] - }) - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " - "Unsupported method is NOTREAL") - - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [] - }) - e = err.exception - e.response["Error"]["Code"].should.equal("MalformedXML") - - # And 101: - many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 - with assert_raises(ClientError) as err: - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": many_rules - }) - e = err.exception - e.response["Error"]["Code"].should.equal("MalformedXML") - - -@mock_s3 -def test_boto3_get_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - - # Without CORS: - with assert_raises(ClientError) as err: - s3.get_bucket_cors(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") - e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") - - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET", - "POST" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - }, - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "PUT" - ], - "AllowedHeaders": [ - "Authorization" - ], - "ExposeHeaders": [ - "x-amz-request-id" - ], - "MaxAgeSeconds": 123 - } - ] - }) - - resp = s3.get_bucket_cors(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - len(resp["CORSRules"]).should.equal(2) - - -@mock_s3 -def test_boto3_delete_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ - "CORSRules": [ - { - "AllowedOrigins": [ - "*" - ], - "AllowedMethods": [ - "GET" - ] - } - ] - }) - - resp = s3.delete_bucket_cors(Bucket=bucket_name) - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) - - # Verify deletion: - with assert_raises(ClientError) as err: - s3.get_bucket_cors(Bucket=bucket_name) - - e = err.exception - e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") - e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") - - -@mock_s3 -def test_put_bucket_acl_body(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - }, - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "READ_ACP" - } - ], - "Owner": bucket_owner - }) - - result = s3.get_bucket_acl(Bucket="bucket") - assert len(result["Grants"]) == 2 - for g in result["Grants"]: - assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" - assert g["Grantee"]["Type"] == "Group" - assert g["Permission"] in ["WRITE", "READ_ACP"] - - # With one: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - } - ], - "Owner": bucket_owner - }) - result = s3.get_bucket_acl(Bucket="bucket") - assert len(result["Grants"]) == 1 - - # With no owner: - with assert_raises(ClientError) as err: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - } - ] - }) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" - - # With incorrect permission: - with assert_raises(ClientError) as err: - s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" - } - ], - "Owner": bucket_owner - }) - assert err.exception.response["Error"]["Code"] == "MalformedACLError" - - # Clear the ACLs: - result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) - assert not result.get("Grants") - - -@mock_s3 -def test_put_bucket_notification(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - - # With no configuration: - result = s3.get_bucket_notification(Bucket="bucket") - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - - # Place proper topic configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "TopicConfigurations": [ - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", - "Events": [ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*" - ] - }, - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", - "Events": [ - "s3:ObjectCreated:*" - ], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - }, - { - "Name": "suffix", - "Value": "png" - } - ] - } - } - } - ] - }) - - # Verify to completion: - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["TopicConfigurations"]) == 2 - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" - assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" - assert len(result["TopicConfigurations"][0]["Events"]) == 2 - assert len(result["TopicConfigurations"][1]["Events"]) == 1 - assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" - assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" - assert result["TopicConfigurations"][0]["Id"] - assert result["TopicConfigurations"][1]["Id"] - assert not result["TopicConfigurations"][0].get("Filter") - assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" - assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" - - # Place proper queue configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "Id": "SomeID", - "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", - "Events": ["s3:ObjectCreated:*"], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - } - ] - } - } - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["QueueConfigurations"]) == 1 - assert not result.get("TopicConfigurations") - assert not result.get("LambdaFunctionConfigurations") - assert result["QueueConfigurations"][0]["Id"] == "SomeID" - assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" - assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert len(result["QueueConfigurations"][0]["Events"]) == 1 - assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 - assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - - # Place proper Lambda configuration: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "LambdaFunctionConfigurations": [ - { - "LambdaFunctionArn": - "arn:aws:lambda:us-east-1:012345678910:function:lambda", - "Events": ["s3:ObjectCreated:*"], - "Filter": { - "Key": { - "FilterRules": [ - { - "Name": "prefix", - "Value": "images/" - } - ] - } - } - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["LambdaFunctionConfigurations"]) == 1 - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert result["LambdaFunctionConfigurations"][0]["Id"] - assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ - "arn:aws:lambda:us-east-1:012345678910:function:lambda" - assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" - assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 - assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 - assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" - assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" - - # And with all 3 set: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "TopicConfigurations": [ - { - "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", - "Events": [ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*" - ] - } - ], - "LambdaFunctionConfigurations": [ - { - "LambdaFunctionArn": - "arn:aws:lambda:us-east-1:012345678910:function:lambda", - "Events": ["s3:ObjectCreated:*"] - } - ], - "QueueConfigurations": [ - { - "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert len(result["LambdaFunctionConfigurations"]) == 1 - assert len(result["TopicConfigurations"]) == 1 - assert len(result["QueueConfigurations"]) == 1 - - # And clear it out: - s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) - result = s3.get_bucket_notification_configuration(Bucket="bucket") - assert not result.get("TopicConfigurations") - assert not result.get("QueueConfigurations") - assert not result.get("LambdaFunctionConfigurations") - - -@mock_s3 -def test_put_bucket_notification_errors(): - s3 = boto3.client("s3", region_name="us-east-1") - s3.create_bucket(Bucket="bucket") - - # With incorrect ARNs: - for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "{}Configurations".format(tech): [ - { - "{}Arn".format(tech): - "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" - - # Region not the same as the bucket: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "QueueArn": - "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", - "Events": ["s3:ObjectCreated:*"] - } - ] - }) - - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == \ - "The notification destination service region is not valid for the bucket location constraint" - - # Invalid event name: - with assert_raises(ClientError) as err: - s3.put_bucket_notification_configuration(Bucket="bucket", - NotificationConfiguration={ - "QueueConfigurations": [ - { - "QueueArn": - "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", - "Events": ["notarealeventname"] - } - ] - }) - assert err.exception.response["Error"]["Code"] == "InvalidArgument" - assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" - - -@mock_s3 -def test_boto3_put_bucket_logging(): - s3 = boto3.client("s3", region_name="us-east-1") - bucket_name = "mybucket" - log_bucket = "logbucket" - wrong_region_bucket = "wrongregionlogbucket" - s3.create_bucket(Bucket=bucket_name) - s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... - s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) - - # No logging config: - result = s3.get_bucket_logging(Bucket=bucket_name) - assert not result.get("LoggingEnabled") - - # A log-bucket that doesn't exist: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": "IAMNOTREAL", - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - - # A log-bucket that's missing the proper ACLs for LogDelivery: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" - assert "log-delivery" in err.exception.response["Error"]["Message"] - - # Add the proper "log-delivery" ACL to the log buckets: - bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] - for bucket in [log_bucket, wrong_region_bucket]: - s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ - "Grants": [ - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "WRITE" - }, - { - "Grantee": { - "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", - "Type": "Group" - }, - "Permission": "READ_ACP" - }, - { - "Grantee": { - "Type": "CanonicalUser", - "ID": bucket_owner["ID"] - }, - "Permission": "FULL_CONTROL" - } - ], - "Owner": bucket_owner - }) - - # A log-bucket that's in the wrong region: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": wrong_region_bucket, - "TargetPrefix": "" - } - }) - assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" - - # Correct logging: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name) - } - }) - result = s3.get_bucket_logging(Bucket=bucket_name) - assert result["LoggingEnabled"]["TargetBucket"] == log_bucket - assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) - assert not result["LoggingEnabled"].get("TargetGrants") - - # And disabling: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) - assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") - - # And enabling with multiple target grants: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "READ" - }, - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "WRITE" - } - ] - } - }) - - result = s3.get_bucket_logging(Bucket=bucket_name) - assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 - assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ - "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" - - # Test with just 1 grant: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "READ" - } - ] - } - }) - result = s3.get_bucket_logging(Bucket=bucket_name) - assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 - - # With an invalid grant: - with assert_raises(ClientError) as err: - s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ - "LoggingEnabled": { - "TargetBucket": log_bucket, - "TargetPrefix": "{}/".format(bucket_name), - "TargetGrants": [ - { - "Grantee": { - "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", - "Type": "CanonicalUser" - }, - "Permission": "NOTAREALPERM" - } - ] - } - }) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_boto3_put_object_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - with assert_raises(ClientError) as err: - s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - - e = err.exception - e.response['Error'].should.equal({ - 'Code': 'NoSuchKey', - 'Message': 'The specified key does not exist.', - 'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', - }) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_put_object_tagging_with_single_tag(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'} - ]} - ) - - resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - - -@mock_s3 -def test_boto3_get_object_tagging(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-tags' - s3.create_bucket(Bucket=bucket_name) - - s3.put_object( - Bucket=bucket_name, - Key=key, - Body='test' - ) - - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - resp['TagSet'].should.have.length_of(0) - - resp = s3.put_object_tagging( - Bucket=bucket_name, - Key=key, - Tagging={'TagSet': [ - {'Key': 'item1', 'Value': 'foo'}, - {'Key': 'item2', 'Value': 'bar'}, - ]} - ) - resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) - - resp['TagSet'].should.have.length_of(2) - resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'}) - resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'}) - - -@mock_s3 -def test_boto3_list_object_versions(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-versions' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - response = s3.list_object_versions( - Bucket=bucket_name - ) - # Two object versions should be returned - len(response['Versions']).should.equal(2) - keys = set([item['Key'] for item in response['Versions']]) - keys.should.equal({key}) - # Test latest object version is returned - response = s3.get_object(Bucket=bucket_name, Key=key) - response['Body'].read().should.equal(items[-1]) - - -@mock_s3 -def test_boto3_bad_prefix_list_object_versions(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = 'key-with-versions' - bad_prefix = 'key-that-does-not-exist' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - response = s3.list_object_versions( - Bucket=bucket_name, - Prefix=bad_prefix, - ) - response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response.should_not.contain('Versions') - response.should_not.contain('DeleteMarkers') - - -@mock_s3 -def test_boto3_delete_markers(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = u'key-with-versions-and-unicode-ó' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - - s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) - - with assert_raises(ClientError) as e: - s3.get_object( - Bucket=bucket_name, - Key=key - ) - e.response['Error']['Code'].should.equal('404') - - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='2' - ) - response = s3.get_object( - Bucket=bucket_name, - Key=key - ) - response['Body'].read().should.equal(items[-1]) - response = s3.list_object_versions( - Bucket=bucket_name - ) - response['Versions'].should.have.length_of(2) - - # We've asserted there is only 2 records so one is newest, one is oldest - latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] - oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] - - # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') - - # Double check the name is still unicode - latest['Key'].should.equal('key-with-versions-and-unicode-ó') - oldest['Key'].should.equal('key-with-versions-and-unicode-ó') - - -@mock_s3 -def test_boto3_multiple_delete_markers(): - s3 = boto3.client('s3', region_name='us-east-1') - bucket_name = 'mybucket' - key = u'key-with-versions-and-unicode-ó' - s3.create_bucket(Bucket=bucket_name) - s3.put_bucket_versioning( - Bucket=bucket_name, - VersioningConfiguration={ - 'Status': 'Enabled' - } - ) - items = (six.b('v1'), six.b('v2')) - for body in items: - s3.put_object( - Bucket=bucket_name, - Key=key, - Body=body - ) - - # Delete the object twice to add multiple delete markers - s3.delete_object(Bucket=bucket_name, Key=key) - s3.delete_object(Bucket=bucket_name, Key=key) - - response = s3.list_object_versions(Bucket=bucket_name) - response['DeleteMarkers'].should.have.length_of(2) - - with assert_raises(ClientError) as e: - s3.get_object( - Bucket=bucket_name, - Key=key - ) - e.response['Error']['Code'].should.equal('404') - - # Remove both delete markers to restore the object - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='2' - ) - s3.delete_object( - Bucket=bucket_name, - Key=key, - VersionId='3' - ) - - response = s3.get_object( - Bucket=bucket_name, - Key=key - ) - response['Body'].read().should.equal(items[-1]) - response = s3.list_object_versions(Bucket=bucket_name) - response['Versions'].should.have.length_of(2) - - # We've asserted there is only 2 records so one is newest, one is oldest - latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] - oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] - - # Double check ordering of version ID's - latest['VersionId'].should.equal('1') - oldest['VersionId'].should.equal('0') - - # Double check the name is still unicode - latest['Key'].should.equal('key-with-versions-and-unicode-ó') - oldest['Key'].should.equal('key-with-versions-and-unicode-ó') - -@mock_s3 -def test_get_stream_gzipped(): - payload = b"this is some stuff here" - - s3_client = boto3.client("s3", region_name='us-east-1') - s3_client.create_bucket(Bucket='moto-tests') - buffer_ = BytesIO() - with GzipFile(fileobj=buffer_, mode='w') as f: - f.write(payload) - payload_gz = buffer_.getvalue() - - s3_client.put_object( - Bucket='moto-tests', - Key='keyname', - Body=payload_gz, - ContentEncoding='gzip', - ) - - obj = s3_client.get_object( - Bucket='moto-tests', - Key='keyname', - ) - res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) - assert res == payload - - -TEST_XML = """\ - - - - index.html - - - - - test/testing - - - test.txt - - - - -""" +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +import datetime +from six.moves.urllib.request import urlopen +from six.moves.urllib.error import HTTPError +from functools import wraps +from gzip import GzipFile +from io import BytesIO +import zlib + +import json +import boto +import boto3 +from botocore.client import ClientError +import botocore.exceptions +from boto.exception import S3CreateError, S3ResponseError +from botocore.handlers import disable_signing +from boto.s3.connection import S3Connection +from boto.s3.key import Key +from freezegun import freeze_time +import six +import requests +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises + +import sure # noqa + +from moto import settings, mock_s3, mock_s3_deprecated +import moto.s3.models as s3model + +if settings.TEST_SERVER_MODE: + REDUCED_PART_SIZE = s3model.UPLOAD_PART_MIN_SIZE + EXPECTED_ETAG = '"140f92a6df9f9e415f74a1463bcee9bb-2"' +else: + REDUCED_PART_SIZE = 256 + EXPECTED_ETAG = '"66d1a1a2ed08fd05c137f316af4ff255-2"' + + +def reduced_min_part_size(f): + """ speed up tests by temporarily making the multipart minimum part size + small + """ + orig_size = s3model.UPLOAD_PART_MIN_SIZE + + @wraps(f) + def wrapped(*args, **kwargs): + try: + s3model.UPLOAD_PART_MIN_SIZE = REDUCED_PART_SIZE + return f(*args, **kwargs) + finally: + s3model.UPLOAD_PART_MIN_SIZE = orig_size + + return wrapped + + +class MyModel(object): + + def __init__(self, name, value): + self.name = name + self.value = value + + def save(self): + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) + + +@mock_s3 +def test_my_model_save(): + # Create Bucket so that test can run + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') + #################################### + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + body = conn.Object('mybucket', 'steve').get()['Body'].read().decode() + + assert body == 'is awesome' + + +@mock_s3 +def test_key_etag(): + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + conn.Bucket('mybucket').Object('steve').e_tag.should.equal( + '"d32bda93738f7e03adb22e66c90fbc04"') + + +@mock_s3_deprecated +def test_multipart_upload_too_small(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + multipart.upload_part_from_file(BytesIO(b'hello'), 1) + multipart.upload_part_from_file(BytesIO(b'world'), 2) + # Multipart with total size under 5MB is refused + multipart.complete_upload.should.throw(S3ResponseError) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_out_of_order(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 4) + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_with_headers(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload( + "the-key", metadata={"foo": "bar"}) + part1 = b'0' * 10 + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.complete_upload() + + key = bucket.get_key("the-key") + key.metadata.should.equal({"foo": "bar"}) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_with_copy_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "original-key" + key.set_contents_from_string("key_value") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.copy_part_from_key("foobar", "original-key", 2, 0, 3) + multipart.complete_upload() + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + b"key_") + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_upload_cancel(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + multipart.cancel_upload() + # TODO we really need some sort of assertion here, but we don't currently + # have the ability to list mulipart uploads for a bucket. + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_etag(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # last part, can be less than 5 MB + part2 = b'1' + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # we should get both parts as the key contents + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) + + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_invalid_order(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * 5242880 + etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag + # last part, can be less than 5 MB + part2 = b'1' + etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag + xml = "{0}{1}" + xml = xml.format(2, etag2) + xml.format(1, etag1) + xml = "{0}".format(xml) + bucket.complete_multipart_upload.when.called_with( + multipart.key_name, multipart.id, xml).should.throw(S3ResponseError) + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_etag_quotes_stripped(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + etag1 = multipart.upload_part_from_file(BytesIO(part1), 1).etag + # last part, can be less than 5 MB + part2 = b'1' + etag2 = multipart.upload_part_from_file(BytesIO(part2), 2).etag + # Strip quotes from etags + etag1 = etag1.replace('"','') + etag2 = etag2.replace('"','') + xml = "{0}{1}" + xml = xml.format(1, etag1) + xml.format(2, etag2) + xml = "{0}".format(xml) + bucket.complete_multipart_upload.when.called_with( + multipart.key_name, multipart.id, xml).should_not.throw(S3ResponseError) + # we should get both parts as the key contents + bucket.get_key("the-key").etag.should.equal(EXPECTED_ETAG) + +@mock_s3_deprecated +@reduced_min_part_size +def test_multipart_duplicate_upload(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + multipart = bucket.initiate_multipart_upload("the-key") + part1 = b'0' * REDUCED_PART_SIZE + multipart.upload_part_from_file(BytesIO(part1), 1) + # same part again + multipart.upload_part_from_file(BytesIO(part1), 1) + part2 = b'1' * 1024 + multipart.upload_part_from_file(BytesIO(part2), 2) + multipart.complete_upload() + # We should get only one copy of part 1. + bucket.get_key( + "the-key").get_contents_as_string().should.equal(part1 + part2) + + +@mock_s3_deprecated +def test_list_multiparts(): + # Create Bucket so that test can run + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('mybucket') + + multipart1 = bucket.initiate_multipart_upload("one-key") + multipart2 = bucket.initiate_multipart_upload("two-key") + uploads = bucket.get_all_multipart_uploads() + uploads.should.have.length_of(2) + dict([(u.key_name, u.id) for u in uploads]).should.equal( + {'one-key': multipart1.id, 'two-key': multipart2.id}) + multipart2.cancel_upload() + uploads = bucket.get_all_multipart_uploads() + uploads.should.have.length_of(1) + uploads[0].key_name.should.equal("one-key") + multipart1.cancel_upload() + uploads = bucket.get_all_multipart_uploads() + uploads.should.be.empty + + +@mock_s3_deprecated +def test_key_save_to_missing_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.get_bucket('mybucket', validate=False) + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string.when.called_with( + "foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_missing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.get_key("the-key").should.equal(None) + + +@mock_s3_deprecated +def test_missing_key_urllib2(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket("foobar") + + urlopen.when.called_with( + "http://foobar.s3.amazonaws.com/the-key").should.throw(HTTPError) + + +@mock_s3_deprecated +def test_empty_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("") + + key = bucket.get_key("the-key") + key.size.should.equal(0) + key.get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_empty_key_set_on_existing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar") + + key = bucket.get_key("the-key") + key.size.should.equal(6) + key.get_contents_as_string().should.equal(b'foobar') + + key.set_contents_from_string("") + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_large_key_save(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar" * 100000) + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + + +@mock_s3_deprecated +def test_copy_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_copy_key_with_version(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.configure_versioning(versioning=True) + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + key.set_contents_from_string("another value") + + bucket.copy_key('new-key', 'foobar', 'the-key', src_version_id='0') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"another value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_set_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = 'the-key' + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("Testval") + + bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') + + +@mock_s3_deprecated +def test_copy_key_replace_metadata(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', + metadata={'momd': 'Mometadatastring'}) + + bucket.get_key("new-key").get_metadata('md').should.be.none + bucket.get_key( + "new-key").get_metadata('momd').should.equal('Mometadatastring') + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_last_modified(): + # See https://github.com/boto/boto/issues/466 + conn = boto.connect_s3() + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + rs = bucket.get_all_keys() + rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') + + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + + +@mock_s3_deprecated +def test_missing_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_with_dash(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_create_existing_bucket(): + "Trying to create a bucket that already exists should raise an Error" + conn = boto.s3.connect_to_region("us-west-2") + conn.create_bucket("foobar") + with assert_raises(S3CreateError): + conn.create_bucket('foobar') + + +@mock_s3_deprecated +def test_create_existing_bucket_in_us_east_1(): + "Trying to create a bucket that already exists in us-east-1 returns the bucket" + + """" + http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html + Your previous request to create the named bucket succeeded and you already + own it. You get this error in all AWS regions except US Standard, + us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if + bucket exists it Amazon S3 will not do anything). + """ + conn = boto.s3.connect_to_region("us-east-1") + conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar") + bucket.name.should.equal("foobar") + + +@mock_s3_deprecated +def test_other_region(): + conn = S3Connection( + 'key', 'secret', host='s3-website-ap-southeast-2.amazonaws.com') + conn.create_bucket("foobar") + list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) + + +@mock_s3_deprecated +def test_bucket_deletion(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + # Try to delete a bucket that still has keys + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + bucket.delete_key("the-key") + conn.delete_bucket("foobar") + + # Get non-existing bucket + conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + # Delete non-existant bucket + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_get_all_buckets(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket("foobar") + conn.create_bucket("foobar2") + buckets = conn.get_all_buckets() + + buckets.should.have.length_of(2) + + +@mock_s3 +@mock_s3_deprecated +def test_post_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing' + }) + + bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') + + +@mock_s3 +@mock_s3_deprecated +def test_post_with_metadata_to_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://foobar.s3.amazonaws.com/", { + 'key': 'the-key', + 'file': 'nothing', + 'x-amz-meta-test': 'metadata' + }) + + bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + + +@mock_s3_deprecated +def test_delete_missing_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + deleted_key = bucket.delete_key("foobar") + deleted_key.key.should.equal("foobar") + + +@mock_s3_deprecated +def test_delete_keys(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['file2', 'file3']) + result.deleted.should.have.length_of(2) + result.errors.should.have.length_of(0) + keys = bucket.get_all_keys() + keys.should.have.length_of(2) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_delete_keys_with_invalid(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['abc', 'file3']) + + result.deleted.should.have.length_of(1) + result.errors.should.have.length_of(1) + keys = bucket.get_all_keys() + keys.should.have.length_of(3) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_bucket_name_with_dot(): + conn = boto.connect_s3() + bucket = conn.create_bucket('firstname.lastname') + + k = Key(bucket, 'somekey') + k.set_contents_from_string('somedata') + + +@mock_s3_deprecated +def test_key_with_special_characters(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_list_keys_2/x?y') + key.set_contents_from_string('value1') + + key_list = bucket.list('test_list_keys_2/', '/') + keys = [x for x in key_list] + keys[0].name.should.equal("test_list_keys_2/x?y") + + +@mock_s3_deprecated +def test_unicode_key_with_slash(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "/the-key-unîcode/test" + key.set_contents_from_string("value") + + key = bucket.get_key("/the-key-unîcode/test") + key.get_contents_as_string().should.equal(b'value') + + +@mock_s3_deprecated +def test_bucket_key_listing_order(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket') + prefix = 'toplevel/' + + def store(name): + k = Key(bucket, prefix + name) + k.set_contents_from_string('somedata') + + names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] + + for name in names: + store(name) + + delimiter = None + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' + ]) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' + ]) + + # Test delimiter with no prefix + delimiter = '/' + keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] + keys.should.equal(['toplevel/']) + + delimiter = None + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal( + [u'toplevel/x/key', u'toplevel/x/y/key', u'toplevel/x/y/z/key']) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal([u'toplevel/x/']) + + +@mock_s3_deprecated +def test_key_with_reduced_redundancy(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_rr_key') + key.set_contents_from_string('value1', reduced_redundancy=True) + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + list(bucket)[0].storage_class.should.equal('REDUCED_REDUNDANCY') + + +@mock_s3_deprecated +def test_copy_key_reduced_redundancy(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key', + storage_class='REDUCED_REDUNDANCY') + + # we use the bucket iterator because of: + # https:/github.com/boto/boto/issues/1173 + keys = dict([(k.name, k) for k in bucket]) + keys['new-key'].storage_class.should.equal("REDUCED_REDUNDANCY") + keys['the-key'].storage_class.should.equal("STANDARD") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_restore_key(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + list(bucket)[0].ongoing_restore.should.be.none + key.restore(1) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") + key.restore(2) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Tue, 03 Jan 2012 12:00:00 GMT") + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_restore_key_headers(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + key.restore(1, headers={'foo': 'bar'}) + key = bucket.get_key('the-key') + key.ongoing_restore.should_not.be.none + key.ongoing_restore.should.be.false + key.expiry_date.should.equal("Mon, 02 Jan 2012 12:00:00 GMT") + + +@mock_s3_deprecated +def test_get_versioning_status(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + d = bucket.get_versioning_status() + d.should.be.empty + + bucket.configure_versioning(versioning=True) + d = bucket.get_versioning_status() + d.shouldnt.be.empty + d.should.have.key('Versioning').being.equal('Enabled') + + bucket.configure_versioning(versioning=False) + d = bucket.get_versioning_status() + d.should.have.key('Versioning').being.equal('Suspended') + + +@mock_s3_deprecated +def test_key_version(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + bucket.configure_versioning(versioning=True) + + key = Key(bucket) + key.key = 'the-key' + key.version_id.should.be.none + key.set_contents_from_string('some string') + key.version_id.should.equal('0') + key.set_contents_from_string('some string') + key.version_id.should.equal('1') + + key = bucket.get_key('the-key') + key.version_id.should.equal('1') + + +@mock_s3_deprecated +def test_list_versions(): + conn = boto.connect_s3('the_key', 'the_secret') + bucket = conn.create_bucket('foobar') + bucket.configure_versioning(versioning=True) + + key = Key(bucket, 'the-key') + key.version_id.should.be.none + key.set_contents_from_string("Version 1") + key.version_id.should.equal('0') + key.set_contents_from_string("Version 2") + key.version_id.should.equal('1') + + versions = list(bucket.list_versions()) + + versions.should.have.length_of(2) + + versions[0].name.should.equal('the-key') + versions[0].version_id.should.equal('0') + versions[0].get_contents_as_string().should.equal(b"Version 1") + + versions[1].name.should.equal('the-key') + versions[1].version_id.should.equal('1') + versions[1].get_contents_as_string().should.equal(b"Version 2") + + key = Key(bucket, 'the2-key') + key.set_contents_from_string("Version 1") + + keys = list(bucket.list()) + keys.should.have.length_of(2) + versions = list(bucket.list_versions(prefix='the2-')) + versions.should.have.length_of(1) + + +@mock_s3_deprecated +def test_acl_setting(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content) + key.make_public() + + key = bucket.get_key(keyname) + + assert key.get_contents_as_string() == content + + grants = key.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_acl_setting_via_headers(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content, headers={ + 'x-amz-grant-full-control': 'uri="http://acs.amazonaws.com/groups/global/AllUsers"' + }) + + key = bucket.get_key(keyname) + + assert key.get_contents_as_string() == content + + grants = key.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'FULL_CONTROL' for g in grants), grants + + +@mock_s3_deprecated +def test_acl_switching(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + content = b'imafile' + keyname = 'test.txt' + + key = Key(bucket, name=keyname) + key.content_type = 'text/plain' + key.set_contents_from_string(content, policy='public-read') + key.set_acl('private') + + grants = key.get_acl().acl.grants + assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_bucket_acl_setting(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + + bucket.make_public() + + grants = bucket.get_acl().acl.grants + assert any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3_deprecated +def test_bucket_acl_switching(): + conn = boto.connect_s3() + bucket = conn.create_bucket('foobar') + bucket.make_public() + + bucket.set_acl('private') + + grants = bucket.get_acl().acl.grants + assert not any(g.uri == 'http://acs.amazonaws.com/groups/global/AllUsers' and + g.permission == 'READ' for g in grants), grants + + +@mock_s3 +def test_s3_object_in_public_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='public-read') + bucket.put_object(Body=b'ABCD', Key='file.txt') + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') + + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') + + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} + presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) + response = requests.get(presigned_url) + assert response.status_code == 200 + + +@mock_s3 +def test_s3_object_in_private_bucket(): + s3 = boto3.resource('s3') + bucket = s3.Bucket('test-bucket') + bucket.create(ACL='private') + bucket.put_object(ACL='private', Body=b'ABCD', Key='file.txt') + + s3_anonymous = boto3.resource('s3') + s3_anonymous.meta.client.meta.events.register('choose-signer.s3.*', disable_signing) + + with assert_raises(ClientError) as exc: + s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() + exc.exception.response['Error']['Code'].should.equal('403') + + bucket.put_object(ACL='public-read', Body=b'ABCD', Key='file.txt') + contents = s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get()['Body'].read() + contents.should.equal(b'ABCD') + + +@mock_s3_deprecated +def test_unicode_key(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = u'こんにちは.jpg' + key.set_contents_from_string('Hello world!') + assert [listed_key.key for listed_key in bucket.list()] == [key.key] + fetched_key = bucket.get_key(key.key) + assert fetched_key.key == key.key + assert fetched_key.get_contents_as_string().decode("utf-8") == 'Hello world!' + + +@mock_s3_deprecated +def test_unicode_value(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = 'some_key' + key.set_contents_from_string(u'こんにちは.jpg') + list(bucket.list()) + key = bucket.get_key(key.key) + assert key.get_contents_as_string().decode("utf-8") == u'こんにちは.jpg' + + +@mock_s3_deprecated +def test_setting_content_encoding(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = bucket.new_key("keyname") + key.set_metadata("Content-Encoding", "gzip") + compressed_data = "abcdef" + key.set_contents_from_string(compressed_data) + + key = bucket.get_key("keyname") + key.content_encoding.should.equal("gzip") + + +@mock_s3_deprecated +def test_bucket_location(): + conn = boto.s3.connect_to_region("us-west-2") + bucket = conn.create_bucket('mybucket') + bucket.get_location().should.equal("us-west-2") + + +@mock_s3_deprecated +def test_ranged_get(): + conn = boto.connect_s3() + bucket = conn.create_bucket('mybucket') + key = Key(bucket) + key.key = 'bigkey' + rep = b"0123456789" + key.set_contents_from_string(rep * 10) + + # Implicitly bounded range requests. + key.get_contents_as_string( + headers={'Range': 'bytes=0-'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=50-'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=99-'}).should.equal(b'9') + + # Explicitly bounded range requests starting from the first byte. + key.get_contents_as_string( + headers={'Range': 'bytes=0-0'}).should.equal(b'0') + key.get_contents_as_string( + headers={'Range': 'bytes=0-49'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=0-99'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=0-700'}).should.equal(rep * 10) + + # Explicitly bounded range requests starting from the / a middle byte. + key.get_contents_as_string( + headers={'Range': 'bytes=50-54'}).should.equal(rep[:5]) + key.get_contents_as_string( + headers={'Range': 'bytes=50-99'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-100'}).should.equal(rep * 5) + key.get_contents_as_string( + headers={'Range': 'bytes=50-700'}).should.equal(rep * 5) + + # Explicitly bounded range requests starting from the last byte. + key.get_contents_as_string( + headers={'Range': 'bytes=99-99'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-100'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=99-700'}).should.equal(b'9') + + # Suffix range requests. + key.get_contents_as_string( + headers={'Range': 'bytes=-1'}).should.equal(b'9') + key.get_contents_as_string( + headers={'Range': 'bytes=-60'}).should.equal(rep * 6) + key.get_contents_as_string( + headers={'Range': 'bytes=-100'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-101'}).should.equal(rep * 10) + key.get_contents_as_string( + headers={'Range': 'bytes=-700'}).should.equal(rep * 10) + + key.size.should.equal(100) + + +@mock_s3_deprecated +def test_policy(): + conn = boto.connect_s3() + bucket_name = 'mybucket' + bucket = conn.create_bucket(bucket_name) + + policy = json.dumps({ + "Version": "2012-10-17", + "Id": "PutObjPolicy", + "Statement": [ + { + "Sid": "DenyUnEncryptedObjectUploads", + "Effect": "Deny", + "Principal": "*", + "Action": "s3:PutObject", + "Resource": "arn:aws:s3:::{bucket_name}/*".format(bucket_name=bucket_name), + "Condition": { + "StringNotEquals": { + "s3:x-amz-server-side-encryption": "aws:kms" + } + } + } + ] + }) + + with assert_raises(S3ResponseError) as err: + bucket.get_policy() + + ex = err.exception + ex.box_usage.should.be.none + ex.error_code.should.equal('NoSuchBucketPolicy') + ex.message.should.equal('The bucket policy does not exist') + ex.reason.should.equal('Not Found') + ex.resource.should.be.none + ex.status.should.equal(404) + ex.body.should.contain(bucket_name) + ex.request_id.should_not.be.none + + bucket.set_policy(policy).should.be.true + + bucket = conn.get_bucket(bucket_name) + + bucket.get_policy().decode('utf-8').should.equal(policy) + + bucket.delete_policy() + + with assert_raises(S3ResponseError) as err: + bucket.get_policy() + + +@mock_s3_deprecated +def test_website_configuration_xml(): + conn = boto.connect_s3() + bucket = conn.create_bucket('test-bucket') + bucket.set_website_configuration_xml(TEST_XML) + bucket.get_website_configuration_xml().should.equal(TEST_XML) + + +@mock_s3_deprecated +def test_key_with_trailing_slash_in_ordinary_calling_format(): + conn = boto.connect_s3( + 'access_key', + 'secret_key', + calling_format=boto.s3.connection.OrdinaryCallingFormat() + ) + bucket = conn.create_bucket('test_bucket_name') + + key_name = 'key_with_slash/' + + key = Key(bucket, key_name) + key.set_contents_from_string('some value') + + [k.name for k in bucket.get_all_keys()].should.contain(key_name) + + +""" +boto3 +""" + + +@mock_s3 +def test_boto3_key_etag(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + + +@mock_s3 +def test_website_redirect_location(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome') + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp.get('WebsiteRedirectLocation').should.be.none + + url = 'https://github.com/spulec/moto' + s3.put_object(Bucket='mybucket', Key='steve', Body=b'is awesome', WebsiteRedirectLocation=url) + resp = s3.get_object(Bucket='mybucket', Key='steve') + resp['WebsiteRedirectLocation'].should.equal(url) + + +@mock_s3 +def test_boto3_list_keys_xml_escaped(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + key_name = 'Q&A.txt' + s3.put_object(Bucket='mybucket', Key=key_name, Body=b'is awesome') + + resp = s3.list_objects_v2(Bucket='mybucket', Prefix=key_name) + + assert resp['Contents'][0]['Key'] == key_name + assert resp['KeyCount'] == 1 + assert resp['MaxKeys'] == 1000 + assert resp['Prefix'] == key_name + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + assert 'Owner' not in resp['Contents'][0] + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'one' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object # owner info was not requested + + next_token = resp['NextContinuationToken'] + + # Second list + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert 'Delimiter' not in resp + assert 'StartAfter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + # Third list + resp = s3.list_objects_v2( + Bucket='mybucket', MaxKeys=1, ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + assert 'StartAfter' not in resp + assert 'NextContinuationToken' not in resp + + +@mock_s3 +def test_boto3_list_objects_v2_truncated_response_start_after(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'1') + s3.put_object(Bucket='mybucket', Key='two', Body=b'22') + s3.put_object(Bucket='mybucket', Key='three', Body=b'333') + + # First list + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one') + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'three' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == True + assert resp['StartAfter'] == 'one' + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + next_token = resp['NextContinuationToken'] + + # Second list + # The ContinuationToken must take precedence over StartAfter. + resp = s3.list_objects_v2(Bucket='mybucket', MaxKeys=1, StartAfter='one', + ContinuationToken=next_token) + listed_object = resp['Contents'][0] + + assert listed_object['Key'] == 'two' + assert resp['MaxKeys'] == 1 + assert resp['Prefix'] == '' + assert resp['KeyCount'] == 1 + assert resp['IsTruncated'] == False + # When ContinuationToken is given, StartAfter is ignored. This also means + # AWS does not return it in the response. + assert 'StartAfter' not in resp + assert 'Delimiter' not in resp + assert 'Owner' not in listed_object + + +@mock_s3 +def test_boto3_list_objects_v2_fetch_owner(): + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + s3.put_object(Bucket='mybucket', Key='one', Body=b'11') + + resp = s3.list_objects_v2(Bucket='mybucket', FetchOwner=True) + owner = resp['Contents'][0]['Owner'] + + assert 'ID' in owner + assert 'DisplayName' in owner + assert len(owner.keys()) == 2 + + +@mock_s3 +def test_boto3_bucket_create(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") + + +@mock_s3 +def test_bucket_create_duplicate(): + s3 = boto3.resource('s3', region_name='us-west-2') + s3.create_bucket(Bucket="blah", CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + }) + with assert_raises(ClientError) as exc: + s3.create_bucket( + Bucket="blah", + CreateBucketConfiguration={ + 'LocationConstraint': 'us-west-2', + } + ) + exc.exception.response['Error']['Code'].should.equal('BucketAlreadyExists') + + +@mock_s3 +def test_boto3_bucket_create_eu_central(): + s3 = boto3.resource('s3', region_name='eu-central-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').get()['Body'].read().decode( + "utf-8").should.equal("some text") + + +@mock_s3 +def test_boto3_head_object(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + + with assert_raises(ClientError) as e: + s3.Object('blah', 'hello2.txt').meta.client.head_object( + Bucket='blah', Key='hello_bad.txt') + e.exception.response['Error']['Code'].should.equal('404') + + +@mock_s3 +def test_boto3_bucket_deletion(): + cli = boto3.client('s3', region_name='us-east-1') + cli.create_bucket(Bucket="foobar") + + cli.put_object(Bucket="foobar", Key="the-key", Body="some value") + + # Try to delete a bucket that still has keys + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + ('An error occurred (BucketNotEmpty) when calling the DeleteBucket operation: ' + 'The bucket you tried to delete is not empty')) + + cli.delete_object(Bucket="foobar", Key="the-key") + cli.delete_bucket(Bucket="foobar") + + # Get non-existing bucket + cli.head_bucket.when.called_with(Bucket="foobar").should.throw( + cli.exceptions.ClientError, + "An error occurred (404) when calling the HeadBucket operation: Not Found") + + # Delete non-existing bucket + cli.delete_bucket.when.called_with(Bucket="foobar").should.throw(cli.exceptions.NoSuchBucket) + + +@mock_s3 +def test_boto3_get_object(): + s3 = boto3.resource('s3', region_name='us-east-1') + s3.create_bucket(Bucket="blah") + + s3.Object('blah', 'hello.txt').put(Body="some text") + + s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + + with assert_raises(ClientError) as e: + s3.Object('blah', 'hello2.txt').get() + + e.exception.response['Error']['Code'].should.equal('NoSuchKey') + + +@mock_s3 +def test_boto3_head_object_with_versioning(): + s3 = boto3.resource('s3', region_name='us-east-1') + bucket = s3.create_bucket(Bucket='blah') + bucket.Versioning().enable() + + old_content = 'some text' + new_content = 'some new text' + s3.Object('blah', 'hello.txt').put(Body=old_content) + s3.Object('blah', 'hello.txt').put(Body=new_content) + + head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt') + head_object['VersionId'].should.equal('1') + head_object['ContentLength'].should.equal(len(new_content)) + + old_head_object = s3.Object('blah', 'hello.txt').meta.client.head_object( + Bucket='blah', Key='hello.txt', VersionId='0') + old_head_object['VersionId'].should.equal('0') + old_head_object['ContentLength'].should.equal(len(old_content)) + + +@mock_s3 +def test_boto3_copy_object_with_versioning(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + + obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] + obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Versions should be the same + obj1_version.should.equal(obj2_version) + + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') + obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Version should be different to previous version + obj2_version_new.should_not.equal(obj2_version) + + +@mock_s3 +def test_boto3_deleted_versionings_list(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) + + listed = client.list_objects_v2(Bucket='blah') + assert len(listed['Contents']) == 1 + + +@mock_s3 +def test_boto3_delete_versioned_bucket(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + resp = client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.delete_object(Bucket='blah', Key='test1', VersionId=resp["VersionId"]) + + client.delete_bucket(Bucket='blah') + + +@mock_s3 +def test_boto3_head_object_if_modified_since(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = "blah" + s3.create_bucket(Bucket=bucket_name) + + key = 'hello.txt' + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + with assert_raises(botocore.exceptions.ClientError) as err: + s3.head_object( + Bucket=bucket_name, + Key=key, + IfModifiedSince=datetime.datetime.utcnow() + datetime.timedelta(hours=1) + ) + e = err.exception + e.response['Error'].should.equal({'Code': '304', 'Message': 'Not Modified'}) + + +@mock_s3 +@reduced_min_part_size +def test_boto3_multipart_etag(): + # Create Bucket so that test can run + s3 = boto3.client('s3', region_name='us-east-1') + s3.create_bucket(Bucket='mybucket') + + upload_id = s3.create_multipart_upload( + Bucket='mybucket', Key='the-key')['UploadId'] + part1 = b'0' * REDUCED_PART_SIZE + etags = [] + etags.append( + s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=1, + UploadId=upload_id, Body=part1)['ETag']) + # last part, can be less than 5 MB + part2 = b'1' + etags.append( + s3.upload_part(Bucket='mybucket', Key='the-key', PartNumber=2, + UploadId=upload_id, Body=part2)['ETag']) + s3.complete_multipart_upload( + Bucket='mybucket', Key='the-key', UploadId=upload_id, + MultipartUpload={'Parts': [{'ETag': etag, 'PartNumber': i} + for i, etag in enumerate(etags, 1)]}) + # we should get both parts as the key contents + resp = s3.get_object(Bucket='mybucket', Key='the-key') + resp['ETag'].should.equal(EXPECTED_ETAG) + + +@mock_s3 +def test_boto3_put_object_with_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test', + Tagging='foo=bar', + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.contain({'Key': 'foo', 'Value': 'bar'}) + + +@mock_s3 +def test_boto3_put_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + # With 1 tag: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + } + ] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # With multiple tags: + resp = s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + # No tags is also OK: + resp = s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + # Get the tags for the bucket: + resp = s3.get_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["TagSet"]).should.equal(2) + + # With no tags: + s3.put_bucket_tagging(Bucket=bucket_name, Tagging={ + "TagSet": [] + }) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_delete_bucket_tagging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + s3.put_bucket_tagging(Bucket=bucket_name, + Tagging={ + "TagSet": [ + { + "Key": "TagOne", + "Value": "ValueOne" + }, + { + "Key": "TagTwo", + "Value": "ValueTwo" + } + ] + }) + + resp = s3.delete_bucket_tagging(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + with assert_raises(ClientError) as err: + s3.get_bucket_tagging(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchTagSet") + e.response["Error"]["Message"].should.equal("The TagSet does not exist") + + +@mock_s3 +def test_boto3_put_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + resp = s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "NOTREAL", + "POST" + ] + } + ] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidRequest") + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + "Unsupported method is NOTREAL") + + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [] + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + # And 101: + many_rules = [{"AllowedOrigins": ["*"], "AllowedMethods": ["GET"]}] * 101 + with assert_raises(ClientError) as err: + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": many_rules + }) + e = err.exception + e.response["Error"]["Code"].should.equal("MalformedXML") + + +@mock_s3 +def test_boto3_get_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + + # Without CORS: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET", + "POST" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + }, + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "PUT" + ], + "AllowedHeaders": [ + "Authorization" + ], + "ExposeHeaders": [ + "x-amz-request-id" + ], + "MaxAgeSeconds": 123 + } + ] + }) + + resp = s3.get_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + len(resp["CORSRules"]).should.equal(2) + + +@mock_s3 +def test_boto3_delete_bucket_cors(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_cors(Bucket=bucket_name, CORSConfiguration={ + "CORSRules": [ + { + "AllowedOrigins": [ + "*" + ], + "AllowedMethods": [ + "GET" + ] + } + ] + }) + + resp = s3.delete_bucket_cors(Bucket=bucket_name) + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + + # Verify deletion: + with assert_raises(ClientError) as err: + s3.get_bucket_cors(Bucket=bucket_name) + + e = err.exception + e.response["Error"]["Code"].should.equal("NoSuchCORSConfiguration") + e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") + + +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_put_bucket_notification(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With no configuration: + result = s3.get_bucket_notification(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + # Place proper topic configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + }, + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", + "Events": [ + "s3:ObjectCreated:*" + ], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + }, + { + "Name": "suffix", + "Value": "png" + } + ] + } + } + } + ] + }) + + # Verify to completion: + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["TopicConfigurations"]) == 2 + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" + assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" + assert len(result["TopicConfigurations"][0]["Events"]) == 2 + assert len(result["TopicConfigurations"][1]["Events"]) == 1 + assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" + assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Id"] + assert result["TopicConfigurations"][1]["Id"] + assert not result["TopicConfigurations"][0].get("Filter") + assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" + + # Place proper queue configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "Id": "SomeID", + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["QueueConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["QueueConfigurations"][0]["Id"] == "SomeID" + assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" + assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["QueueConfigurations"][0]["Events"]) == 1 + assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # Place proper Lambda configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert result["LambdaFunctionConfigurations"][0]["Id"] + assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ + "arn:aws:lambda:us-east-1:012345678910:function:lambda" + assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 + assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # And with all 3 set: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + } + ], + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"] + } + ], + "QueueConfigurations": [ + { + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert len(result["TopicConfigurations"]) == 1 + assert len(result["QueueConfigurations"]) == 1 + + # And clear it out: + s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + +@mock_s3 +def test_put_bucket_notification_errors(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With incorrect ARNs: + for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "{}Configurations".format(tech): [ + { + "{}Arn".format(tech): + "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" + + # Region not the same as the bucket: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == \ + "The notification destination service region is not valid for the bucket location constraint" + + # Invalid event name: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["notarealeventname"] + } + ] + }) + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_boto3_put_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + with assert_raises(ClientError) as err: + s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + e = err.exception + e.response['Error'].should.equal({ + 'Code': 'NoSuchKey', + 'Message': 'The specified key does not exist.', + 'RequestID': '7a62c49f-347e-4fc4-9331-6e8eEXAMPLE', + }) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_put_object_tagging_with_single_tag(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'} + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + +@mock_s3 +def test_boto3_get_object_tagging(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + resp['TagSet'].should.have.length_of(0) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'}, + {'Key': 'item2', 'Value': 'bar'}, + ]} + ) + resp = s3.get_object_tagging(Bucket=bucket_name, Key=key) + + resp['TagSet'].should.have.length_of(2) + resp['TagSet'].should.contain({'Key': 'item1', 'Value': 'foo'}) + resp['TagSet'].should.contain({'Key': 'item2', 'Value': 'bar'}) + + +@mock_s3 +def test_boto3_list_object_versions(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name + ) + # Two object versions should be returned + len(response['Versions']).should.equal(2) + keys = set([item['Key'] for item in response['Versions']]) + keys.should.equal({key}) + # Test latest object version is returned + response = s3.get_object(Bucket=bucket_name, Key=key) + response['Body'].read().should.equal(items[-1]) + + +@mock_s3 +def test_boto3_bad_prefix_list_object_versions(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-versions' + bad_prefix = 'key-that-does-not-exist' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + response = s3.list_object_versions( + Bucket=bucket_name, + Prefix=bad_prefix, + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + response.should_not.contain('Versions') + response.should_not.contain('DeleteMarkers') + + +@mock_s3 +def test_boto3_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions( + Bucket=bucket_name + ) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + + +@mock_s3 +def test_boto3_multiple_delete_markers(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = u'key-with-versions-and-unicode-ó' + s3.create_bucket(Bucket=bucket_name) + s3.put_bucket_versioning( + Bucket=bucket_name, + VersioningConfiguration={ + 'Status': 'Enabled' + } + ) + items = (six.b('v1'), six.b('v2')) + for body in items: + s3.put_object( + Bucket=bucket_name, + Key=key, + Body=body + ) + + # Delete the object twice to add multiple delete markers + s3.delete_object(Bucket=bucket_name, Key=key) + s3.delete_object(Bucket=bucket_name, Key=key) + + response = s3.list_object_versions(Bucket=bucket_name) + response['DeleteMarkers'].should.have.length_of(2) + + with assert_raises(ClientError) as e: + s3.get_object( + Bucket=bucket_name, + Key=key + ) + e.response['Error']['Code'].should.equal('404') + + # Remove both delete markers to restore the object + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='2' + ) + s3.delete_object( + Bucket=bucket_name, + Key=key, + VersionId='3' + ) + + response = s3.get_object( + Bucket=bucket_name, + Key=key + ) + response['Body'].read().should.equal(items[-1]) + response = s3.list_object_versions(Bucket=bucket_name) + response['Versions'].should.have.length_of(2) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') + +@mock_s3 +def test_get_stream_gzipped(): + payload = b"this is some stuff here" + + s3_client = boto3.client("s3", region_name='us-east-1') + s3_client.create_bucket(Bucket='moto-tests') + buffer_ = BytesIO() + with GzipFile(fileobj=buffer_, mode='w') as f: + f.write(payload) + payload_gz = buffer_.getvalue() + + s3_client.put_object( + Bucket='moto-tests', + Key='keyname', + Body=payload_gz, + ContentEncoding='gzip', + ) + + obj = s3_client.get_object( + Bucket='moto-tests', + Key='keyname', + ) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) + assert res == payload + + +TEST_XML = """\ + + + + index.html + + + + + test/testing + + + test.txt + + + + +""" diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 3d533a641..6cb43e96f 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -1,387 +1,387 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.exception import S3ResponseError -from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule - -import sure # noqa -from botocore.exceptions import ClientError -from datetime import datetime -from nose.tools import assert_raises - -from moto import mock_s3_deprecated, mock_s3 - - -@mock_s3_deprecated -def test_lifecycle_create(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - lifecycle.add_rule('myid', '', 'Enabled', 30) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - len(response).should.equal(1) - lifecycle = response[0] - lifecycle.id.should.equal('myid') - lifecycle.prefix.should.equal('') - lifecycle.status.should.equal('Enabled') - list(lifecycle.transition).should.equal([]) - - -@mock_s3 -def test_lifecycle_with_filters(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - # Create a lifecycle rule with a Filter (no tags): - lfc = { - "Rules": [ - { - "Expiration": { - "Days": 7 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == '' - assert not result["Rules"][0]["Filter"].get("And") - assert not result["Rules"][0]["Filter"].get("Tag") - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With a tag: - lfc["Rules"][0]["Filter"]["Tag"] = { - "Key": "mytag", - "Value": "mytagvalue" - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == '' - assert not result["Rules"][0]["Filter"].get("And") - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With And (single tag): - lfc["Rules"][0]["Filter"]["And"] = { - "Prefix": "some/prefix", - "Tags": [ - { - "Key": "mytag", - "Value": "mytagvalue" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == "" - assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" - assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # With multiple And tags: - lfc["Rules"][0]["Filter"]["And"] = { - "Prefix": "some/prefix", - "Tags": [ - { - "Key": "mytag", - "Value": "mytagvalue" - }, - { - "Key": "mytag2", - "Value": "mytagvalue2" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Filter"]["Prefix"] == "" - assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" - assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" - assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" - assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" - assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" - with assert_raises(KeyError): - assert result["Rules"][0]["Prefix"] - - # Can't have both filter and prefix: - lfc["Rules"][0]["Prefix"] = '' - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - lfc["Rules"][0]["Prefix"] = 'some/path' - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - # No filters -- just a prefix: - del lfc["Rules"][0]["Filter"] - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert not result["Rules"][0].get("Filter") - assert result["Rules"][0]["Prefix"] == "some/path" - - -@mock_s3 -def test_lifecycle_with_eodm(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "Expiration": { - "ExpiredObjectDeleteMarker": True - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] - - # Set to False: - lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] - - # With failure: - lfc["Rules"][0]["Expiration"]["Days"] = 7 - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - del lfc["Rules"][0]["Expiration"]["Days"] - - lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_lifecycle_with_nve(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "NoncurrentVersionExpiration": { - "NoncurrentDays": 30 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 - - # Change NoncurrentDays: - lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 - - # TODO: Add test for failures due to missing children - - -@mock_s3 -def test_lifecycle_with_nvt(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "NoncurrentVersionTransitions": [{ - "NoncurrentDays": 30, - "StorageClass": "ONEZONE_IA" - }], - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" - - # Change NoncurrentDays: - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 - - # Change StorageClass: - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" - - # With failures for missing children: - del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 - - del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] - with assert_raises(ClientError) as err: - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - assert err.exception.response["Error"]["Code"] == "MalformedXML" - - -@mock_s3 -def test_lifecycle_with_aimu(): - client = boto3.client("s3") - client.create_bucket(Bucket="bucket") - - lfc = { - "Rules": [ - { - "AbortIncompleteMultipartUpload": { - "DaysAfterInitiation": 7 - }, - "ID": "wholebucket", - "Filter": { - "Prefix": "" - }, - "Status": "Enabled" - } - ] - } - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 - - # Change DaysAfterInitiation: - lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 - client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) - result = client.get_bucket_lifecycle_configuration(Bucket="bucket") - assert len(result["Rules"]) == 1 - assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 - - # TODO: Add test for failures due to missing children - - -@mock_s3_deprecated -def test_lifecycle_with_glacier_transition(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - transition = Transition(days=30, storage_class='GLACIER') - rule = Rule('myid', prefix='', status='Enabled', expiration=None, - transition=transition) - lifecycle.append(rule) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - transition = response[0].transition - transition.days.should.equal(30) - transition.storage_class.should.equal('GLACIER') - transition.date.should.equal(None) - - -@mock_s3_deprecated -def test_lifecycle_multi(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - date = '2022-10-12T00:00:00.000Z' - sc = 'GLACIER' - lifecycle = Lifecycle() - lifecycle.add_rule("1", "1/", "Enabled", 1) - lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) - lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) - lifecycle.add_rule("4", "4/", "Enabled", None, - Transition(days=4, storage_class=sc)) - lifecycle.add_rule("5", "5/", "Enabled", None, - Transition(date=date, storage_class=sc)) - - bucket.configure_lifecycle(lifecycle) - # read the lifecycle back - rules = bucket.get_lifecycle_config() - - for rule in rules: - if rule.id == "1": - rule.prefix.should.equal("1/") - rule.expiration.days.should.equal(1) - elif rule.id == "2": - rule.prefix.should.equal("2/") - rule.expiration.days.should.equal(2) - elif rule.id == "3": - rule.prefix.should.equal("3/") - rule.expiration.date.should.equal(date) - elif rule.id == "4": - rule.prefix.should.equal("4/") - rule.transition.days.should.equal(4) - rule.transition.storage_class.should.equal(sc) - elif rule.id == "5": - rule.prefix.should.equal("5/") - rule.transition.date.should.equal(date) - rule.transition.storage_class.should.equal(sc) - else: - assert False, "Invalid rule id" - - -@mock_s3_deprecated -def test_lifecycle_delete(): - conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") - - lifecycle = Lifecycle() - lifecycle.add_rule(expiration=30) - bucket.configure_lifecycle(lifecycle) - response = bucket.get_lifecycle_config() - response.should.have.length_of(1) - - bucket.delete_lifecycle_configuration() - bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError) +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3_deprecated +def test_lifecycle_create(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + lifecycle.add_rule('myid', '', 'Enabled', 30) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + len(response).should.equal(1) + lifecycle = response[0] + lifecycle.id.should.equal('myid') + lifecycle.prefix.should.equal('') + lifecycle.status.should.equal('Enabled') + list(lifecycle.transition).should.equal([]) + + +@mock_s3 +def test_lifecycle_with_filters(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + # Create a lifecycle rule with a Filter (no tags): + lfc = { + "Rules": [ + { + "Expiration": { + "Days": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert not result["Rules"][0]["Filter"].get("Tag") + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With a tag: + lfc["Rules"][0]["Filter"]["Tag"] = { + "Key": "mytag", + "Value": "mytagvalue" + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With And (single tag): + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With multiple And tags: + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + }, + { + "Key": "mytag2", + "Value": "mytagvalue2" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # Can't have both filter and prefix: + lfc["Rules"][0]["Prefix"] = '' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + lfc["Rules"][0]["Prefix"] = 'some/path' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + # No filters -- just a prefix: + del lfc["Rules"][0]["Filter"] + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert not result["Rules"][0].get("Filter") + assert result["Rules"][0]["Prefix"] == "some/path" + + +@mock_s3 +def test_lifecycle_with_eodm(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "Expiration": { + "ExpiredObjectDeleteMarker": True + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # Set to False: + lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # With failure: + lfc["Rules"][0]["Expiration"]["Days"] = 7 + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + del lfc["Rules"][0]["Expiration"]["Days"] + + lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_nve(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionExpiration": { + "NoncurrentDays": 30 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 30 + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionExpiration"]["NoncurrentDays"] == 10 + + # TODO: Add test for failures due to missing children + + +@mock_s3 +def test_lifecycle_with_nvt(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "NoncurrentVersionTransitions": [{ + "NoncurrentDays": 30, + "StorageClass": "ONEZONE_IA" + }], + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 30 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "ONEZONE_IA" + + # Change NoncurrentDays: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 10 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] == 10 + + # Change StorageClass: + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] = "GLACIER" + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] == "GLACIER" + + # With failures for missing children: + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["NoncurrentDays"] = 30 + + del lfc["Rules"][0]["NoncurrentVersionTransitions"][0]["StorageClass"] + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + +@mock_s3 +def test_lifecycle_with_aimu(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "AbortIncompleteMultipartUpload": { + "DaysAfterInitiation": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 7 + + # Change DaysAfterInitiation: + lfc["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] = 30 + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["AbortIncompleteMultipartUpload"]["DaysAfterInitiation"] == 30 + + # TODO: Add test for failures due to missing children + + +@mock_s3_deprecated +def test_lifecycle_with_glacier_transition(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + transition = Transition(days=30, storage_class='GLACIER') + rule = Rule('myid', prefix='', status='Enabled', expiration=None, + transition=transition) + lifecycle.append(rule) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + transition = response[0].transition + transition.days.should.equal(30) + transition.storage_class.should.equal('GLACIER') + transition.date.should.equal(None) + + +@mock_s3_deprecated +def test_lifecycle_multi(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + date = '2022-10-12T00:00:00.000Z' + sc = 'GLACIER' + lifecycle = Lifecycle() + lifecycle.add_rule("1", "1/", "Enabled", 1) + lifecycle.add_rule("2", "2/", "Enabled", Expiration(days=2)) + lifecycle.add_rule("3", "3/", "Enabled", Expiration(date=date)) + lifecycle.add_rule("4", "4/", "Enabled", None, + Transition(days=4, storage_class=sc)) + lifecycle.add_rule("5", "5/", "Enabled", None, + Transition(date=date, storage_class=sc)) + + bucket.configure_lifecycle(lifecycle) + # read the lifecycle back + rules = bucket.get_lifecycle_config() + + for rule in rules: + if rule.id == "1": + rule.prefix.should.equal("1/") + rule.expiration.days.should.equal(1) + elif rule.id == "2": + rule.prefix.should.equal("2/") + rule.expiration.days.should.equal(2) + elif rule.id == "3": + rule.prefix.should.equal("3/") + rule.expiration.date.should.equal(date) + elif rule.id == "4": + rule.prefix.should.equal("4/") + rule.transition.days.should.equal(4) + rule.transition.storage_class.should.equal(sc) + elif rule.id == "5": + rule.prefix.should.equal("5/") + rule.transition.date.should.equal(date) + rule.transition.storage_class.should.equal(sc) + else: + assert False, "Invalid rule id" + + +@mock_s3_deprecated +def test_lifecycle_delete(): + conn = boto.s3.connect_to_region("us-west-1") + bucket = conn.create_bucket("foobar") + + lifecycle = Lifecycle() + lifecycle.add_rule(expiration=30) + bucket.configure_lifecycle(lifecycle) + response = bucket.get_lifecycle_config() + response.should.have.length_of(1) + + bucket.delete_lifecycle_configuration() + bucket.get_lifecycle_config.when.called_with().should.throw(S3ResponseError) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index 99908c501..982376e23 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -1,106 +1,106 @@ -from __future__ import unicode_literals - -import boto -import boto3 -from boto.exception import S3CreateError, S3ResponseError -from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule - -import sure # noqa -from botocore.exceptions import ClientError -from datetime import datetime -from nose.tools import assert_raises - -from moto import mock_s3_deprecated, mock_s3 - - -@mock_s3 -def test_s3_storage_class_standard(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # add an object to the bucket with standard storage - - s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") - - list_of_objects = s3.list_objects(Bucket="Bucket") - - list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") - - -@mock_s3 -def test_s3_storage_class_infrequent_access(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # add an object to the bucket with standard storage - - s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") - - D = s3.list_objects(Bucket="Bucket") - - D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") - - -@mock_s3 -def test_s3_storage_class_copy(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") - - s3.create_bucket(Bucket="Bucket2") - # second object is originally of storage class REDUCED_REDUNDANCY - s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") - - s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") - - list_of_copied_objects = s3.list_objects(Bucket="Bucket2") - - # checks that a copied object can be properly copied - list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") - - -@mock_s3 -def test_s3_invalid_copied_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") - - s3.create_bucket(Bucket="Bucket2") - s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") - - # Try to copy an object with an invalid storage class - with assert_raises(ClientError) as err: - s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") - - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidStorageClass") - e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") - - -@mock_s3 -def test_s3_invalid_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - # Try to add an object with an invalid storage class - with assert_raises(ClientError) as err: - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") - - e = err.exception - e.response["Error"]["Code"].should.equal("InvalidStorageClass") - e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") - -@mock_s3 -def test_s3_default_storage_class(): - s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") - - s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") - - list_of_objects = s3.list_objects(Bucket="Bucket") - - # tests that the default storage class is still STANDARD - list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") - - - +from __future__ import unicode_literals + +import boto +import boto3 +from boto.exception import S3CreateError, S3ResponseError +from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule + +import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises + +from moto import mock_s3_deprecated, mock_s3 + + +@mock_s3 +def test_s3_storage_class_standard(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key", Body="my_value") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + list_of_objects['Contents'][0]["StorageClass"].should.equal("STANDARD") + + +@mock_s3 +def test_s3_storage_class_infrequent_access(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # add an object to the bucket with standard storage + + s3.put_object(Bucket="Bucket", Key="my_key_infrequent", Body="my_value_infrequent", StorageClass="STANDARD_IA") + + D = s3.list_objects(Bucket="Bucket") + + D['Contents'][0]["StorageClass"].should.equal("STANDARD_IA") + + +@mock_s3 +def test_s3_storage_class_copy(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + # second object is originally of storage class REDUCED_REDUNDANCY + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2") + + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="ONEZONE_IA") + + list_of_copied_objects = s3.list_objects(Bucket="Bucket2") + + # checks that a copied object can be properly copied + list_of_copied_objects["Contents"][0]["StorageClass"].should.equal("ONEZONE_IA") + + +@mock_s3 +def test_s3_invalid_copied_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD") + + s3.create_bucket(Bucket="Bucket2") + s3.put_object(Bucket="Bucket2", Key="Second_Object", Body="Body2", StorageClass="REDUCED_REDUNDANCY") + + # Try to copy an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.copy_object(CopySource = {"Bucket": "Bucket", "Key": "First_Object"}, Bucket="Bucket2", Key="Second_Object", StorageClass="STANDARD2") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + + +@mock_s3 +def test_s3_invalid_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + # Try to add an object with an invalid storage class + with assert_raises(ClientError) as err: + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARDD") + + e = err.exception + e.response["Error"]["Code"].should.equal("InvalidStorageClass") + e.response["Error"]["Message"].should.equal("The storage class you specified is not valid") + +@mock_s3 +def test_s3_default_storage_class(): + s3 = boto3.client("s3") + s3.create_bucket(Bucket="Bucket") + + s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") + + list_of_objects = s3.list_objects(Bucket="Bucket") + + # tests that the default storage class is still STANDARD + list_of_objects["Contents"][0]["StorageClass"].should.equal("STANDARD") + + + diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index ce9f54c75..501137910 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,80 +1,80 @@ -from __future__ import unicode_literals -import os -from sure import expect -from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url - - -def test_base_url(): - expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) - - -def test_localhost_bucket(): - expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') - ).should.equal("wfoobar") - - -def test_localhost_without_bucket(): - expect(bucket_name_from_url( - 'https://www.localhost:5000/def')).should.equal(None) - -def test_force_ignore_subdomain_for_bucketnames(): - os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' - expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) - del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) - - - -def test_versioned_key_store(): - d = _VersionedKeyStore() - - d.should.have.length_of(0) - - d['key'] = [1] - - d.should.have.length_of(1) - - d['key'] = 2 - d.should.have.length_of(1) - - d.should.have.key('key').being.equal(2) - - d.get.when.called_with('key').should.return_value(2) - d.get.when.called_with('badkey').should.return_value(None) - d.get.when.called_with('badkey', 'HELLO').should.return_value('HELLO') - - # Tests key[ - d.shouldnt.have.key('badkey') - d.__getitem__.when.called_with('badkey').should.throw(KeyError) - - d.getlist('key').should.have.length_of(2) - d.getlist('key').should.be.equal([[1], 2]) - d.getlist('badkey').should.be.none - - d.setlist('key', 1) - d.getlist('key').should.be.equal([1]) - - d.setlist('key', (1, 2)) - d.getlist('key').shouldnt.be.equal((1, 2)) - d.getlist('key').should.be.equal([1, 2]) - - d.setlist('key', [[1], [2]]) - d['key'].should.have.length_of(1) - d.getlist('key').should.be.equal([[1], [2]]) - - -def test_parse_region_from_url(): - expected = 'us-west-2' - for url in ['http://s3-us-west-2.amazonaws.com/bucket', - 'http://s3.us-west-2.amazonaws.com/bucket', - 'http://bucket.s3-us-west-2.amazonaws.com', - 'https://s3-us-west-2.amazonaws.com/bucket', - 'https://s3.us-west-2.amazonaws.com/bucket', - 'https://bucket.s3-us-west-2.amazonaws.com']: - parse_region_from_url(url).should.equal(expected) - - expected = 'us-east-1' - for url in ['http://s3.amazonaws.com/bucket', - 'http://bucket.s3.amazonaws.com', - 'https://s3.amazonaws.com/bucket', - 'https://bucket.s3.amazonaws.com']: - parse_region_from_url(url).should.equal(expected) +from __future__ import unicode_literals +import os +from sure import expect +from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url + + +def test_base_url(): + expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) + + +def test_localhost_bucket(): + expect(bucket_name_from_url('https://wfoobar.localhost:5000/abc') + ).should.equal("wfoobar") + + +def test_localhost_without_bucket(): + expect(bucket_name_from_url( + 'https://www.localhost:5000/def')).should.equal(None) + +def test_force_ignore_subdomain_for_bucketnames(): + os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' + expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) + del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) + + + +def test_versioned_key_store(): + d = _VersionedKeyStore() + + d.should.have.length_of(0) + + d['key'] = [1] + + d.should.have.length_of(1) + + d['key'] = 2 + d.should.have.length_of(1) + + d.should.have.key('key').being.equal(2) + + d.get.when.called_with('key').should.return_value(2) + d.get.when.called_with('badkey').should.return_value(None) + d.get.when.called_with('badkey', 'HELLO').should.return_value('HELLO') + + # Tests key[ + d.shouldnt.have.key('badkey') + d.__getitem__.when.called_with('badkey').should.throw(KeyError) + + d.getlist('key').should.have.length_of(2) + d.getlist('key').should.be.equal([[1], 2]) + d.getlist('badkey').should.be.none + + d.setlist('key', 1) + d.getlist('key').should.be.equal([1]) + + d.setlist('key', (1, 2)) + d.getlist('key').shouldnt.be.equal((1, 2)) + d.getlist('key').should.be.equal([1, 2]) + + d.setlist('key', [[1], [2]]) + d['key'].should.have.length_of(1) + d.getlist('key').should.be.equal([[1], [2]]) + + +def test_parse_region_from_url(): + expected = 'us-west-2' + for url in ['http://s3-us-west-2.amazonaws.com/bucket', + 'http://s3.us-west-2.amazonaws.com/bucket', + 'http://bucket.s3-us-west-2.amazonaws.com', + 'https://s3-us-west-2.amazonaws.com/bucket', + 'https://s3.us-west-2.amazonaws.com/bucket', + 'https://bucket.s3-us-west-2.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) + + expected = 'us-east-1' + for url in ['http://s3.amazonaws.com/bucket', + 'http://bucket.s3.amazonaws.com', + 'https://s3.amazonaws.com/bucket', + 'https://bucket.s3.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) diff --git a/tests/test_s3/test_server.py b/tests/test_s3/test_server.py index 9c8252a04..efa05b862 100644 --- a/tests/test_s3/test_server.py +++ b/tests/test_s3/test_server.py @@ -1,105 +1,105 @@ -# coding=utf-8 - -from __future__ import unicode_literals -import sure # noqa - -from flask.testing import FlaskClient -import moto.server as server - -''' -Test the different server responses -''' - - -class AuthenticatedClient(FlaskClient): - def open(self, *args, **kwargs): - kwargs['headers'] = kwargs.get('headers', {}) - kwargs['headers']['Authorization'] = "Any authorization header" - return super(AuthenticatedClient, self).open(*args, **kwargs) - - -def authenticated_client(): - backend = server.create_backend_app("s3") - backend.test_client_class = AuthenticatedClient - return backend.test_client() - - -def test_s3_server_get(): - test_client = authenticated_client() - res = test_client.get('/') - - res.data.should.contain(b'ListAllMyBucketsResult') - - -def test_s3_server_bucket_create(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobaz') - - res = test_client.get('/', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.put( - '/bar', 'http://foobaz.localhost:5000/', data='test value') - res.status_code.should.equal(200) - assert 'ETag' in dict(res.headers) - - res = test_client.get('/bar', 'http://foobaz.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"test value") - - -def test_s3_server_bucket_versioning(): - test_client = authenticated_client() - - # Just enough XML to enable versioning - body = 'Enabled' - res = test_client.put( - '/?versioning', 'http://foobaz.localhost:5000', data=body) - res.status_code.should.equal(200) - - -def test_s3_server_post_to_bucket(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://tester.localhost:5000/') - res.status_code.should.equal(200) - - test_client.post('/', "https://tester.localhost:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/the-key', 'http://tester.localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_post_without_content_length(): - test_client = authenticated_client() - - res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''}) - res.status_code.should.equal(411) - - res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''}) - res.status_code.should.equal(411) - - -def test_s3_server_post_unicode_bucket_key(): - # Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names) - dispatcher = server.DomainDispatcherApplication(server.create_backend_app) - backend_app = dispatcher.get_application({ - 'HTTP_HOST': 's3.amazonaws.com', - 'PATH_INFO': '/test-bucket/test-object-てすと' - }) - assert backend_app - backend_app = dispatcher.get_application({ - 'HTTP_HOST': 's3.amazonaws.com', - 'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8') - }) - assert backend_app +# coding=utf-8 + +from __future__ import unicode_literals +import sure # noqa + +from flask.testing import FlaskClient +import moto.server as server + +''' +Test the different server responses +''' + + +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + + +def authenticated_client(): + backend = server.create_backend_app("s3") + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() + res = test_client.get('/') + + res.data.should.contain(b'ListAllMyBucketsResult') + + +def test_s3_server_bucket_create(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobaz') + + res = test_client.get('/', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.put( + '/bar', 'http://foobaz.localhost:5000/', data='test value') + res.status_code.should.equal(200) + assert 'ETag' in dict(res.headers) + + res = test_client.get('/bar', 'http://foobaz.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"test value") + + +def test_s3_server_bucket_versioning(): + test_client = authenticated_client() + + # Just enough XML to enable versioning + body = 'Enabled' + res = test_client.put( + '/?versioning', 'http://foobaz.localhost:5000', data=body) + res.status_code.should.equal(200) + + +def test_s3_server_post_to_bucket(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://tester.localhost:5000/') + res.status_code.should.equal(200) + + test_client.post('/', "https://tester.localhost:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/the-key', 'http://tester.localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_post_without_content_length(): + test_client = authenticated_client() + + res = test_client.put('/', 'http://tester.localhost:5000/', environ_overrides={'CONTENT_LENGTH': ''}) + res.status_code.should.equal(411) + + res = test_client.post('/', "https://tester.localhost:5000/", environ_overrides={'CONTENT_LENGTH': ''}) + res.status_code.should.equal(411) + + +def test_s3_server_post_unicode_bucket_key(): + # Make sure that we can deal with non-ascii characters in request URLs (e.g., S3 object names) + dispatcher = server.DomainDispatcherApplication(server.create_backend_app) + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと' + }) + assert backend_app + backend_app = dispatcher.get_application({ + 'HTTP_HOST': 's3.amazonaws.com', + 'PATH_INFO': '/test-bucket/test-object-てすと'.encode('utf-8') + }) + assert backend_app diff --git a/tests/test_s3bucket_path/test_bucket_path_server.py b/tests/test_s3bucket_path/test_bucket_path_server.py index 434110e87..0fd73c3b9 100644 --- a/tests/test_s3bucket_path/test_bucket_path_server.py +++ b/tests/test_s3bucket_path/test_bucket_path_server.py @@ -1,113 +1,113 @@ -from __future__ import unicode_literals -import sure # noqa - -from flask.testing import FlaskClient -import moto.server as server - -''' -Test the different server responses -''' - - -class AuthenticatedClient(FlaskClient): - def open(self, *args, **kwargs): - kwargs['headers'] = kwargs.get('headers', {}) - kwargs['headers']['Authorization'] = "Any authorization header" - return super(AuthenticatedClient, self).open(*args, **kwargs) - - -def authenticated_client(): - backend = server.create_backend_app("s3bucket_path") - backend.test_client_class = AuthenticatedClient - return backend.test_client() - - -def test_s3_server_get(): - test_client = authenticated_client() - - res = test_client.get('/') - - res.data.should.contain(b'ListAllMyBucketsResult') - - -def test_s3_server_bucket_create(): - test_client = authenticated_client() - - res = test_client.put('/foobar', 'http://localhost:5000') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobar') - - res = test_client.get('/foobar', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.put('/foobar2/', 'http://localhost:5000') - res.status_code.should.equal(200) - - res = test_client.get('/') - res.data.should.contain(b'foobar2') - - res = test_client.get('/foobar2/', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.contain(b"ListBucketResult") - - res = test_client.get('/missing-bucket', 'http://localhost:5000') - res.status_code.should.equal(404) - - res = test_client.put( - '/foobar/bar', 'http://localhost:5000', data='test value') - res.status_code.should.equal(200) - - res = test_client.get('/foobar/bar', 'http://localhost:5000') - res.status_code.should.equal(200) - res.data.should.equal(b"test value") - - -def test_s3_server_post_to_bucket(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://localhost:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://localhost:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://localhost:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_put_ipv6(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://[::]:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://[::]:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://[::]:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") - - -def test_s3_server_put_ipv4(): - test_client = authenticated_client() - - res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') - res.status_code.should.equal(200) - - test_client.post('/foobar2', "https://127.0.0.1:5000/", data={ - 'key': 'the-key', - 'file': 'nothing' - }) - - res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/') - res.status_code.should.equal(200) - res.data.should.equal(b"nothing") +from __future__ import unicode_literals +import sure # noqa + +from flask.testing import FlaskClient +import moto.server as server + +''' +Test the different server responses +''' + + +class AuthenticatedClient(FlaskClient): + def open(self, *args, **kwargs): + kwargs['headers'] = kwargs.get('headers', {}) + kwargs['headers']['Authorization'] = "Any authorization header" + return super(AuthenticatedClient, self).open(*args, **kwargs) + + +def authenticated_client(): + backend = server.create_backend_app("s3bucket_path") + backend.test_client_class = AuthenticatedClient + return backend.test_client() + + +def test_s3_server_get(): + test_client = authenticated_client() + + res = test_client.get('/') + + res.data.should.contain(b'ListAllMyBucketsResult') + + +def test_s3_server_bucket_create(): + test_client = authenticated_client() + + res = test_client.put('/foobar', 'http://localhost:5000') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobar') + + res = test_client.get('/foobar', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.put('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + + res = test_client.get('/') + res.data.should.contain(b'foobar2') + + res = test_client.get('/foobar2/', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.contain(b"ListBucketResult") + + res = test_client.get('/missing-bucket', 'http://localhost:5000') + res.status_code.should.equal(404) + + res = test_client.put( + '/foobar/bar', 'http://localhost:5000', data='test value') + res.status_code.should.equal(200) + + res = test_client.get('/foobar/bar', 'http://localhost:5000') + res.status_code.should.equal(200) + res.data.should.equal(b"test value") + + +def test_s3_server_post_to_bucket(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://localhost:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://localhost:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://localhost:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv6(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://[::]:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://[::]:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://[::]:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") + + +def test_s3_server_put_ipv4(): + test_client = authenticated_client() + + res = test_client.put('/foobar2', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + + test_client.post('/foobar2', "https://127.0.0.1:5000/", data={ + 'key': 'the-key', + 'file': 'nothing' + }) + + res = test_client.get('/foobar2/the-key', 'http://127.0.0.1:5000/') + res.status_code.should.equal(200) + res.data.should.equal(b"nothing") diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index 21d786c61..2ec5e8f30 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -1,321 +1,321 @@ -from __future__ import unicode_literals -from six.moves.urllib.request import urlopen -from six.moves.urllib.error import HTTPError - -import boto -from boto.exception import S3ResponseError -from boto.s3.key import Key -from boto.s3.connection import OrdinaryCallingFormat - -from freezegun import freeze_time -import requests - -import sure # noqa - -from moto import mock_s3, mock_s3_deprecated - - -def create_connection(key=None, secret=None): - return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) - - -class MyModel(object): - - def __init__(self, name, value): - self.name = name - self.value = value - - def save(self): - conn = create_connection('the_key', 'the_secret') - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) - - -@mock_s3_deprecated -def test_my_model_save(): - # Create Bucket so that test can run - conn = create_connection('the_key', 'the_secret') - conn.create_bucket('mybucket') - #################################### - - model_instance = MyModel('steve', 'is awesome') - model_instance.save() - - conn.get_bucket('mybucket').get_key( - 'steve').get_contents_as_string().should.equal(b'is awesome') - - -@mock_s3_deprecated -def test_missing_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - bucket.get_key("the-key").should.equal(None) - - -@mock_s3_deprecated -def test_missing_key_urllib2(): - conn = create_connection('the_key', 'the_secret') - conn.create_bucket("foobar") - - urlopen.when.called_with( - "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) - - -@mock_s3_deprecated -def test_empty_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("") - - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_empty_key_set_on_existing_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar") - - bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar') - - key.set_contents_from_string("") - bucket.get_key("the-key").get_contents_as_string().should.equal(b'') - - -@mock_s3_deprecated -def test_large_key_save(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("foobar" * 100000) - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) - - -@mock_s3_deprecated -def test_copy_key(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - bucket.copy_key('new-key', 'foobar', 'the-key') - - bucket.get_key( - "the-key").get_contents_as_string().should.equal(b"some value") - bucket.get_key( - "new-key").get_contents_as_string().should.equal(b"some value") - - -@mock_s3_deprecated -def test_set_metadata(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = 'the-key' - key.set_metadata('md', 'Metadatastring') - key.set_contents_from_string("Testval") - - bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') - - -@freeze_time("2012-01-01 12:00:00") -@mock_s3_deprecated -def test_last_modified(): - # See https://github.com/boto/boto/issues/466 - conn = create_connection() - bucket = conn.create_bucket("foobar") - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - rs = bucket.get_all_keys() - rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') - - bucket.get_key( - "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') - - -@mock_s3_deprecated -def test_missing_bucket(): - conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_with_dash(): - conn = create_connection('the_key', 'the_secret') - conn.get_bucket.when.called_with( - 'mybucket-test').should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_bucket_deletion(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - key = Key(bucket) - key.key = "the-key" - key.set_contents_from_string("some value") - - # Try to delete a bucket that still has keys - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - bucket.delete_key("the-key") - conn.delete_bucket("foobar") - - # Get non-existing bucket - conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - # Delete non-existant bucket - conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) - - -@mock_s3_deprecated -def test_get_all_buckets(): - conn = create_connection('the_key', 'the_secret') - conn.create_bucket("foobar") - conn.create_bucket("foobar2") - buckets = conn.get_all_buckets() - - buckets.should.have.length_of(2) - - -@mock_s3 -@mock_s3_deprecated -def test_post_to_bucket(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://s3.amazonaws.com/foobar", { - 'key': 'the-key', - 'file': 'nothing' - }) - - bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') - - -@mock_s3 -@mock_s3_deprecated -def test_post_with_metadata_to_bucket(): - conn = create_connection('the_key', 'the_secret') - bucket = conn.create_bucket("foobar") - - requests.post("https://s3.amazonaws.com/foobar", { - 'key': 'the-key', - 'file': 'nothing', - 'x-amz-meta-test': 'metadata' - }) - - bucket.get_key('the-key').get_metadata('test').should.equal('metadata') - - -@mock_s3_deprecated -def test_bucket_name_with_dot(): - conn = create_connection() - bucket = conn.create_bucket('firstname.lastname') - - k = Key(bucket, 'somekey') - k.set_contents_from_string('somedata') - - -@mock_s3_deprecated -def test_key_with_special_characters(): - conn = create_connection() - bucket = conn.create_bucket('test_bucket_name') - - key = Key(bucket, 'test_list_keys_2/*x+?^@~!y') - key.set_contents_from_string('value1') - - key_list = bucket.list('test_list_keys_2/', '/') - keys = [x for x in key_list] - keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y") - - -@mock_s3_deprecated -def test_bucket_key_listing_order(): - conn = create_connection() - bucket = conn.create_bucket('test_bucket') - prefix = 'toplevel/' - - def store(name): - k = Key(bucket, prefix + name) - k.set_contents_from_string('somedata') - - names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] - - for name in names: - store(name) - - delimiter = None - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' - ]) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix, delimiter)] - keys.should.equal([ - 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' - ]) - - # Test delimiter with no prefix - delimiter = '/' - keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] - keys.should.equal(['toplevel/']) - - delimiter = None - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal( - ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) - - delimiter = '/' - keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] - keys.should.equal(['toplevel/x/']) - - -@mock_s3_deprecated -def test_delete_keys(): - conn = create_connection() - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['file2', 'file3']) - result.deleted.should.have.length_of(2) - result.errors.should.have.length_of(0) - keys = bucket.get_all_keys() - keys.should.have.length_of(2) - keys[0].name.should.equal('file1') - - -@mock_s3_deprecated -def test_delete_keys_with_invalid(): - conn = create_connection() - bucket = conn.create_bucket('foobar') - - Key(bucket=bucket, name='file1').set_contents_from_string('abc') - Key(bucket=bucket, name='file2').set_contents_from_string('abc') - Key(bucket=bucket, name='file3').set_contents_from_string('abc') - Key(bucket=bucket, name='file4').set_contents_from_string('abc') - - result = bucket.delete_keys(['abc', 'file3']) - - result.deleted.should.have.length_of(1) - result.errors.should.have.length_of(1) - keys = bucket.get_all_keys() - keys.should.have.length_of(3) - keys[0].name.should.equal('file1') +from __future__ import unicode_literals +from six.moves.urllib.request import urlopen +from six.moves.urllib.error import HTTPError + +import boto +from boto.exception import S3ResponseError +from boto.s3.key import Key +from boto.s3.connection import OrdinaryCallingFormat + +from freezegun import freeze_time +import requests + +import sure # noqa + +from moto import mock_s3, mock_s3_deprecated + + +def create_connection(key=None, secret=None): + return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) + + +class MyModel(object): + + def __init__(self, name, value): + self.name = name + self.value = value + + def save(self): + conn = create_connection('the_key', 'the_secret') + bucket = conn.get_bucket('mybucket') + k = Key(bucket) + k.key = self.name + k.set_contents_from_string(self.value) + + +@mock_s3_deprecated +def test_my_model_save(): + # Create Bucket so that test can run + conn = create_connection('the_key', 'the_secret') + conn.create_bucket('mybucket') + #################################### + + model_instance = MyModel('steve', 'is awesome') + model_instance.save() + + conn.get_bucket('mybucket').get_key( + 'steve').get_contents_as_string().should.equal(b'is awesome') + + +@mock_s3_deprecated +def test_missing_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + bucket.get_key("the-key").should.equal(None) + + +@mock_s3_deprecated +def test_missing_key_urllib2(): + conn = create_connection('the_key', 'the_secret') + conn.create_bucket("foobar") + + urlopen.when.called_with( + "http://s3.amazonaws.com/foobar/the-key").should.throw(HTTPError) + + +@mock_s3_deprecated +def test_empty_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("") + + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_empty_key_set_on_existing_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar") + + bucket.get_key("the-key").get_contents_as_string().should.equal(b'foobar') + + key.set_contents_from_string("") + bucket.get_key("the-key").get_contents_as_string().should.equal(b'') + + +@mock_s3_deprecated +def test_large_key_save(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("foobar" * 100000) + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b'foobar' * 100000) + + +@mock_s3_deprecated +def test_copy_key(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + bucket.copy_key('new-key', 'foobar', 'the-key') + + bucket.get_key( + "the-key").get_contents_as_string().should.equal(b"some value") + bucket.get_key( + "new-key").get_contents_as_string().should.equal(b"some value") + + +@mock_s3_deprecated +def test_set_metadata(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = 'the-key' + key.set_metadata('md', 'Metadatastring') + key.set_contents_from_string("Testval") + + bucket.get_key('the-key').get_metadata('md').should.equal('Metadatastring') + + +@freeze_time("2012-01-01 12:00:00") +@mock_s3_deprecated +def test_last_modified(): + # See https://github.com/boto/boto/issues/466 + conn = create_connection() + bucket = conn.create_bucket("foobar") + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + rs = bucket.get_all_keys() + rs[0].last_modified.should.equal('2012-01-01T12:00:00.000Z') + + bucket.get_key( + "the-key").last_modified.should.equal('Sun, 01 Jan 2012 12:00:00 GMT') + + +@mock_s3_deprecated +def test_missing_bucket(): + conn = create_connection('the_key', 'the_secret') + conn.get_bucket.when.called_with('mybucket').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_with_dash(): + conn = create_connection('the_key', 'the_secret') + conn.get_bucket.when.called_with( + 'mybucket-test').should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_bucket_deletion(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + key = Key(bucket) + key.key = "the-key" + key.set_contents_from_string("some value") + + # Try to delete a bucket that still has keys + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + bucket.delete_key("the-key") + conn.delete_bucket("foobar") + + # Get non-existing bucket + conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + # Delete non-existant bucket + conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) + + +@mock_s3_deprecated +def test_get_all_buckets(): + conn = create_connection('the_key', 'the_secret') + conn.create_bucket("foobar") + conn.create_bucket("foobar2") + buckets = conn.get_all_buckets() + + buckets.should.have.length_of(2) + + +@mock_s3 +@mock_s3_deprecated +def test_post_to_bucket(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://s3.amazonaws.com/foobar", { + 'key': 'the-key', + 'file': 'nothing' + }) + + bucket.get_key('the-key').get_contents_as_string().should.equal(b'nothing') + + +@mock_s3 +@mock_s3_deprecated +def test_post_with_metadata_to_bucket(): + conn = create_connection('the_key', 'the_secret') + bucket = conn.create_bucket("foobar") + + requests.post("https://s3.amazonaws.com/foobar", { + 'key': 'the-key', + 'file': 'nothing', + 'x-amz-meta-test': 'metadata' + }) + + bucket.get_key('the-key').get_metadata('test').should.equal('metadata') + + +@mock_s3_deprecated +def test_bucket_name_with_dot(): + conn = create_connection() + bucket = conn.create_bucket('firstname.lastname') + + k = Key(bucket, 'somekey') + k.set_contents_from_string('somedata') + + +@mock_s3_deprecated +def test_key_with_special_characters(): + conn = create_connection() + bucket = conn.create_bucket('test_bucket_name') + + key = Key(bucket, 'test_list_keys_2/*x+?^@~!y') + key.set_contents_from_string('value1') + + key_list = bucket.list('test_list_keys_2/', '/') + keys = [x for x in key_list] + keys[0].name.should.equal("test_list_keys_2/*x+?^@~!y") + + +@mock_s3_deprecated +def test_bucket_key_listing_order(): + conn = create_connection() + bucket = conn.create_bucket('test_bucket') + prefix = 'toplevel/' + + def store(name): + k = Key(bucket, prefix + name) + k.set_contents_from_string('somedata') + + names = ['x/key', 'y.key1', 'y.key2', 'y.key3', 'x/y/key', 'x/y/z/key'] + + for name in names: + store(name) + + delimiter = None + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key', + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3' + ]) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix, delimiter)] + keys.should.equal([ + 'toplevel/y.key1', 'toplevel/y.key2', 'toplevel/y.key3', 'toplevel/x/' + ]) + + # Test delimiter with no prefix + delimiter = '/' + keys = [x.name for x in bucket.list(prefix=None, delimiter=delimiter)] + keys.should.equal(['toplevel/']) + + delimiter = None + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal( + ['toplevel/x/key', 'toplevel/x/y/key', 'toplevel/x/y/z/key']) + + delimiter = '/' + keys = [x.name for x in bucket.list(prefix + 'x', delimiter)] + keys.should.equal(['toplevel/x/']) + + +@mock_s3_deprecated +def test_delete_keys(): + conn = create_connection() + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['file2', 'file3']) + result.deleted.should.have.length_of(2) + result.errors.should.have.length_of(0) + keys = bucket.get_all_keys() + keys.should.have.length_of(2) + keys[0].name.should.equal('file1') + + +@mock_s3_deprecated +def test_delete_keys_with_invalid(): + conn = create_connection() + bucket = conn.create_bucket('foobar') + + Key(bucket=bucket, name='file1').set_contents_from_string('abc') + Key(bucket=bucket, name='file2').set_contents_from_string('abc') + Key(bucket=bucket, name='file3').set_contents_from_string('abc') + Key(bucket=bucket, name='file4').set_contents_from_string('abc') + + result = bucket.delete_keys(['abc', 'file3']) + + result.deleted.should.have.length_of(1) + result.errors.should.have.length_of(1) + keys = bucket.get_all_keys() + keys.should.have.length_of(3) + keys[0].name.should.equal('file1') diff --git a/tests/test_s3bucket_path/test_s3bucket_path_combo.py b/tests/test_s3bucket_path/test_s3bucket_path_combo.py index e1b1075ee..60dd58e85 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_combo.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_combo.py @@ -1,25 +1,25 @@ -from __future__ import unicode_literals - -import boto -from boto.s3.connection import OrdinaryCallingFormat - -from moto import mock_s3_deprecated - - -def create_connection(key=None, secret=None): - return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) - - -def test_bucketpath_combo_serial(): - @mock_s3_deprecated - def make_bucket_path(): - conn = create_connection() - conn.create_bucket('mybucketpath') - - @mock_s3_deprecated - def make_bucket(): - conn = boto.connect_s3('the_key', 'the_secret') - conn.create_bucket('mybucket') - - make_bucket() - make_bucket_path() +from __future__ import unicode_literals + +import boto +from boto.s3.connection import OrdinaryCallingFormat + +from moto import mock_s3_deprecated + + +def create_connection(key=None, secret=None): + return boto.connect_s3(key, secret, calling_format=OrdinaryCallingFormat()) + + +def test_bucketpath_combo_serial(): + @mock_s3_deprecated + def make_bucket_path(): + conn = create_connection() + conn.create_bucket('mybucketpath') + + @mock_s3_deprecated + def make_bucket(): + conn = boto.connect_s3('the_key', 'the_secret') + conn.create_bucket('mybucket') + + make_bucket() + make_bucket_path() diff --git a/tests/test_s3bucket_path/test_s3bucket_path_utils.py b/tests/test_s3bucket_path/test_s3bucket_path_utils.py index c607ea2ec..0bcc5cbe0 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path_utils.py +++ b/tests/test_s3bucket_path/test_s3bucket_path_utils.py @@ -1,16 +1,16 @@ -from __future__ import unicode_literals -from sure import expect -from moto.s3bucket_path.utils import bucket_name_from_url - - -def test_base_url(): - expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) - - -def test_localhost_bucket(): - expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') - ).should.equal("wfoobar") - - -def test_localhost_without_bucket(): - expect(bucket_name_from_url('https://www.localhost:5000')).should.equal(None) +from __future__ import unicode_literals +from sure import expect +from moto.s3bucket_path.utils import bucket_name_from_url + + +def test_base_url(): + expect(bucket_name_from_url('https://s3.amazonaws.com/')).should.equal(None) + + +def test_localhost_bucket(): + expect(bucket_name_from_url('https://localhost:5000/wfoobar/abc') + ).should.equal("wfoobar") + + +def test_localhost_without_bucket(): + expect(bucket_name_from_url('https://www.localhost:5000')).should.equal(None) diff --git a/tests/test_secretsmanager/test_secretsmanager.py b/tests/test_secretsmanager/test_secretsmanager.py index ec384a660..9d496704c 100644 --- a/tests/test_secretsmanager/test_secretsmanager.py +++ b/tests/test_secretsmanager/test_secretsmanager.py @@ -1,286 +1,286 @@ -from __future__ import unicode_literals - -import boto3 - -from moto import mock_secretsmanager -from botocore.exceptions import ClientError -import sure # noqa -import string -import unittest -from nose.tools import assert_raises - -@mock_secretsmanager -def test_get_secret_value(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - create_secret = conn.create_secret(Name='java-util-test-password', - SecretString="foosecret") - result = conn.get_secret_value(SecretId='java-util-test-password') - assert result['SecretString'] == 'foosecret' - -@mock_secretsmanager -def test_get_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_get_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - create_secret = conn.create_secret(Name='java-util-test-password', - SecretString="foosecret") - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-match') - -@mock_secretsmanager -def test_create_secret(): - conn = boto3.client('secretsmanager', region_name='us-east-1') - - result = conn.create_secret(Name='test-secret', SecretString="foosecret") - assert result['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') - assert result['Name'] == 'test-secret' - secret = conn.get_secret_value(SecretId='test-secret') - assert secret['SecretString'] == 'foosecret' - -@mock_secretsmanager -def test_get_random_password_default_length(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password() - assert len(random_password['RandomPassword']) == 32 - -@mock_secretsmanager -def test_get_random_password_default_requirements(): - # When require_each_included_type, default true - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password() - # Should contain lowercase, upppercase, digit, special character - assert any(c.islower() for c in random_password['RandomPassword']) - assert any(c.isupper() for c in random_password['RandomPassword']) - assert any(c.isdigit() for c in random_password['RandomPassword']) - assert any(c in string.punctuation - for c in random_password['RandomPassword']) - -@mock_secretsmanager -def test_get_random_password_custom_length(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=50) - assert len(random_password['RandomPassword']) == 50 - -@mock_secretsmanager -def test_get_random_exclude_lowercase(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=55, - ExcludeLowercase=True) - assert any(c.islower() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_uppercase(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=55, - ExcludeUppercase=True) - assert any(c.isupper() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_characters_and_symbols(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=20, - ExcludeCharacters='xyzDje@?!.') - assert any(c in 'xyzDje@?!.' for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_numbers(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=100, - ExcludeNumbers=True) - assert any(c.isdigit() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_exclude_punctuation(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=100, - ExcludePunctuation=True) - assert any(c in string.punctuation - for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_include_space_false(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=300) - assert any(c.isspace() for c in random_password['RandomPassword']) == False - -@mock_secretsmanager -def test_get_random_include_space_true(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=4, - IncludeSpace=True) - assert any(c.isspace() for c in random_password['RandomPassword']) == True - -@mock_secretsmanager -def test_get_random_require_each_included_type(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - random_password = conn.get_random_password(PasswordLength=4, - RequireEachIncludedType=True) - assert any(c in string.punctuation for c in random_password['RandomPassword']) == True - assert any(c in string.ascii_lowercase for c in random_password['RandomPassword']) == True - assert any(c in string.ascii_uppercase for c in random_password['RandomPassword']) == True - assert any(c in string.digits for c in random_password['RandomPassword']) == True - -@mock_secretsmanager -def test_get_random_too_short_password(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - random_password = conn.get_random_password(PasswordLength=3) - -@mock_secretsmanager -def test_get_random_too_long_password(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(Exception): - random_password = conn.get_random_password(PasswordLength=5555) - -@mock_secretsmanager -def test_describe_secret(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - secret_description = conn.describe_secret(SecretId='test-secret') - assert secret_description # Returned dict is not empty - assert secret_description['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad') - -@mock_secretsmanager -def test_describe_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_describe_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - with assert_raises(ClientError): - result = conn.get_secret_value(SecretId='i-dont-match') - -@mock_secretsmanager -def test_rotate_secret(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotated_secret = conn.rotate_secret(SecretId=secret_name) - - assert rotated_secret - assert rotated_secret['ARN'] == ( - 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' - ) - assert rotated_secret['Name'] == secret_name - assert rotated_secret['VersionId'] != '' - -@mock_secretsmanager -def test_rotate_secret_enable_rotation(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - initial_description = conn.describe_secret(SecretId=secret_name) - assert initial_description - assert initial_description['RotationEnabled'] is False - assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 - - conn.rotate_secret(SecretId=secret_name, - RotationRules={'AutomaticallyAfterDays': 42}) - - rotated_description = conn.describe_secret(SecretId=secret_name) - assert rotated_description - assert rotated_description['RotationEnabled'] is True - assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 - -@mock_secretsmanager -def test_rotate_secret_that_does_not_exist(): - conn = boto3.client('secretsmanager', 'us-west-2') - - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId='i-dont-exist') - -@mock_secretsmanager -def test_rotate_secret_that_does_not_match(): - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name='test-secret', - SecretString='foosecret') - - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId='i-dont-match') - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_short(): - # Test is intentionally empty. Boto3 catches too short ClientRequestToken - # and raises ParamValidationError before Moto can see it. - # test_server actually handles this error. - assert True - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - client_request_token = ( - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' - ) - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - ClientRequestToken=client_request_token) - -@mock_secretsmanager -def test_rotate_secret_rotation_lambda_arn_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - RotationLambdaARN=rotation_lambda_arn) - -@mock_secretsmanager -def test_rotate_secret_rotation_period_zero(): - # Test is intentionally empty. Boto3 catches zero day rotation period - # and raises ParamValidationError before Moto can see it. - # test_server actually handles this error. - assert True - -@mock_secretsmanager -def test_rotate_secret_rotation_period_too_long(): - secret_name = 'test-secret' - conn = boto3.client('secretsmanager', region_name='us-west-2') - conn.create_secret(Name=secret_name, - SecretString='foosecret') - - rotation_rules = {'AutomaticallyAfterDays': 1001} - with assert_raises(ClientError): - result = conn.rotate_secret(SecretId=secret_name, - RotationRules=rotation_rules) +from __future__ import unicode_literals + +import boto3 + +from moto import mock_secretsmanager +from botocore.exceptions import ClientError +import sure # noqa +import string +import unittest +from nose.tools import assert_raises + +@mock_secretsmanager +def test_get_secret_value(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + result = conn.get_secret_value(SecretId='java-util-test-password') + assert result['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + create_secret = conn.create_secret(Name='java-util-test-password', + SecretString="foosecret") + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_create_secret(): + conn = boto3.client('secretsmanager', region_name='us-east-1') + + result = conn.create_secret(Name='test-secret', SecretString="foosecret") + assert result['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert result['Name'] == 'test-secret' + secret = conn.get_secret_value(SecretId='test-secret') + assert secret['SecretString'] == 'foosecret' + +@mock_secretsmanager +def test_get_random_password_default_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + assert len(random_password['RandomPassword']) == 32 + +@mock_secretsmanager +def test_get_random_password_default_requirements(): + # When require_each_included_type, default true + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password() + # Should contain lowercase, upppercase, digit, special character + assert any(c.islower() for c in random_password['RandomPassword']) + assert any(c.isupper() for c in random_password['RandomPassword']) + assert any(c.isdigit() for c in random_password['RandomPassword']) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) + +@mock_secretsmanager +def test_get_random_password_custom_length(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=50) + assert len(random_password['RandomPassword']) == 50 + +@mock_secretsmanager +def test_get_random_exclude_lowercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeLowercase=True) + assert any(c.islower() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_uppercase(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=55, + ExcludeUppercase=True) + assert any(c.isupper() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_characters_and_symbols(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=20, + ExcludeCharacters='xyzDje@?!.') + assert any(c in 'xyzDje@?!.' for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_numbers(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludeNumbers=True) + assert any(c.isdigit() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_exclude_punctuation(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=100, + ExcludePunctuation=True) + assert any(c in string.punctuation + for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_false(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=300) + assert any(c.isspace() for c in random_password['RandomPassword']) == False + +@mock_secretsmanager +def test_get_random_include_space_true(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + IncludeSpace=True) + assert any(c.isspace() for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_require_each_included_type(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + random_password = conn.get_random_password(PasswordLength=4, + RequireEachIncludedType=True) + assert any(c in string.punctuation for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_lowercase for c in random_password['RandomPassword']) == True + assert any(c in string.ascii_uppercase for c in random_password['RandomPassword']) == True + assert any(c in string.digits for c in random_password['RandomPassword']) == True + +@mock_secretsmanager +def test_get_random_too_short_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + random_password = conn.get_random_password(PasswordLength=3) + +@mock_secretsmanager +def test_get_random_too_long_password(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(Exception): + random_password = conn.get_random_password(PasswordLength=5555) + +@mock_secretsmanager +def test_describe_secret(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + secret_description = conn.describe_secret(SecretId='test-secret') + assert secret_description # Returned dict is not empty + assert secret_description['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad') + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.get_secret_value(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotated_secret = conn.rotate_secret(SecretId=secret_name) + + assert rotated_secret + assert rotated_secret['ARN'] == ( + 'arn:aws:secretsmanager:us-west-2:1234567890:secret:test-secret-rIjad' + ) + assert rotated_secret['Name'] == secret_name + assert rotated_secret['VersionId'] != '' + +@mock_secretsmanager +def test_rotate_secret_enable_rotation(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + initial_description = conn.describe_secret(SecretId=secret_name) + assert initial_description + assert initial_description['RotationEnabled'] is False + assert initial_description['RotationRules']['AutomaticallyAfterDays'] == 0 + + conn.rotate_secret(SecretId=secret_name, + RotationRules={'AutomaticallyAfterDays': 42}) + + rotated_description = conn.describe_secret(SecretId=secret_name) + assert rotated_description + assert rotated_description['RotationEnabled'] is True + assert rotated_description['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + conn = boto3.client('secretsmanager', 'us-west-2') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-exist') + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name='test-secret', + SecretString='foosecret') + + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId='i-dont-match') + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + # Test is intentionally empty. Boto3 catches too short ClientRequestToken + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + ClientRequestToken=client_request_token) + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationLambdaARN=rotation_lambda_arn) + +@mock_secretsmanager +def test_rotate_secret_rotation_period_zero(): + # Test is intentionally empty. Boto3 catches zero day rotation period + # and raises ParamValidationError before Moto can see it. + # test_server actually handles this error. + assert True + +@mock_secretsmanager +def test_rotate_secret_rotation_period_too_long(): + secret_name = 'test-secret' + conn = boto3.client('secretsmanager', region_name='us-west-2') + conn.create_secret(Name=secret_name, + SecretString='foosecret') + + rotation_rules = {'AutomaticallyAfterDays': 1001} + with assert_raises(ClientError): + result = conn.rotate_secret(SecretId=secret_name, + RotationRules=rotation_rules) diff --git a/tests/test_secretsmanager/test_server.py b/tests/test_secretsmanager/test_server.py index e573f9b67..3365fe4de 100644 --- a/tests/test_secretsmanager/test_server.py +++ b/tests/test_secretsmanager/test_server.py @@ -1,421 +1,421 @@ -from __future__ import unicode_literals - -import json -import sure # noqa - -import moto.server as server -from moto import mock_secretsmanager - -''' -Test the different server responses for secretsmanager -''' - - -@mock_secretsmanager -def test_get_secret_value(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - get_secret = test_client.post('/', - data={"SecretId": "test-secret", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['SecretString'] == 'foo-secret' - -@mock_secretsmanager -def test_get_secret_that_does_not_exist(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - get_secret = test_client.post('/', - data={"SecretId": "i-dont-exist", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_get_secret_that_does_not_match(): - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - get_secret = test_client.post('/', - data={"SecretId": "i-dont-match", - "VersionStage": "AWSCURRENT"}, - headers={ - "X-Amz-Target": "secretsmanager.GetSecretValue"}, - ) - json_data = json.loads(get_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_create_secret(): - - backend = server.create_backend_app("secretsmanager") - test_client = backend.test_client() - - res = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foo-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret"}, - ) - - json_data = json.loads(res.data.decode("utf-8")) - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') - assert json_data['Name'] == 'test-secret' - -@mock_secretsmanager -def test_describe_secret(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - describe_secret = test_client.post('/', - data={"SecretId": "test-secret"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) - -@mock_secretsmanager -def test_describe_secret_that_does_not_exist(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - describe_secret = test_client.post('/', - data={"SecretId": "i-dont-exist"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_describe_secret_that_does_not_match(): - - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - describe_secret = test_client.post('/', - data={"SecretId": "i-dont-match"}, - headers={ - "X-Amz-Target": "secretsmanager.DescribeSecret" - }, - ) - - json_data = json.loads(describe_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data # Returned dict is not empty - assert json_data['ARN'] == ( - 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' - ) - assert json_data['Name'] == 'test-secret' - assert json_data['VersionId'] == client_request_token - -# @mock_secretsmanager -# def test_rotate_secret_enable_rotation(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post( -# '/', -# data={ -# "Name": "test-secret", -# "SecretString": "foosecret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# initial_description = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.DescribeSecret" -# }, -# ) - -# json_data = json.loads(initial_description.data.decode("utf-8")) -# assert json_data # Returned dict is not empty -# assert json_data['RotationEnabled'] is False -# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 - -# rotate_secret = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 42} -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# rotated_description = test_client.post( -# '/', -# data={ -# "SecretId": "test-secret" -# }, -# headers={ -# "X-Amz-Target": "secretsmanager.DescribeSecret" -# }, -# ) - -# json_data = json.loads(rotated_description.data.decode("utf-8")) -# assert json_data # Returned dict is not empty -# assert json_data['RotationEnabled'] is True -# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 - -@mock_secretsmanager -def test_rotate_secret_that_does_not_exist(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - rotate_secret = test_client.post('/', - data={"SecretId": "i-dont-exist"}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret_that_does_not_match(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - rotate_secret = test_client.post('/', - data={"SecretId": "i-dont-match"}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "Secrets Manager can't find the specified secret" - assert json_data['__type'] == 'ResourceNotFoundException' - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_short(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." - assert json_data['__type'] == 'InvalidParameterException' - -@mock_secretsmanager -def test_rotate_secret_client_request_token_too_long(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - client_request_token = ( - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' - 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' - ) - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "ClientRequestToken": client_request_token}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." - assert json_data['__type'] == 'InvalidParameterException' - -@mock_secretsmanager -def test_rotate_secret_rotation_lambda_arn_too_long(): - backend = server.create_backend_app('secretsmanager') - test_client = backend.test_client() - - create_secret = test_client.post('/', - data={"Name": "test-secret", - "SecretString": "foosecret"}, - headers={ - "X-Amz-Target": "secretsmanager.CreateSecret" - }, - ) - - rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters - rotate_secret = test_client.post('/', - data={"SecretId": "test-secret", - "RotationLambdaARN": rotation_lambda_arn}, - headers={ - "X-Amz-Target": "secretsmanager.RotateSecret" - }, - ) - - json_data = json.loads(rotate_secret.data.decode("utf-8")) - assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." - assert json_data['__type'] == 'InvalidParameterException' - - -# -# The following tests should work, but fail on the embedded dict in -# RotationRules. The error message suggests a problem deeper in the code, which -# needs further investigation. -# - -# @mock_secretsmanager -# def test_rotate_secret_rotation_period_zero(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post('/', -# data={"Name": "test-secret", -# "SecretString": "foosecret"}, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# rotate_secret = test_client.post('/', -# data={"SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 0}}, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# json_data = json.loads(rotate_secret.data.decode("utf-8")) -# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." -# assert json_data['__type'] == 'InvalidParameterException' - -# @mock_secretsmanager -# def test_rotate_secret_rotation_period_too_long(): -# backend = server.create_backend_app('secretsmanager') -# test_client = backend.test_client() - -# create_secret = test_client.post('/', -# data={"Name": "test-secret", -# "SecretString": "foosecret"}, -# headers={ -# "X-Amz-Target": "secretsmanager.CreateSecret" -# }, -# ) - -# rotate_secret = test_client.post('/', -# data={"SecretId": "test-secret", -# "RotationRules": {"AutomaticallyAfterDays": 1001}}, -# headers={ -# "X-Amz-Target": "secretsmanager.RotateSecret" -# }, -# ) - -# json_data = json.loads(rotate_secret.data.decode("utf-8")) -# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." -# assert json_data['__type'] == 'InvalidParameterException' +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_secretsmanager + +''' +Test the different server responses for secretsmanager +''' + + +@mock_secretsmanager +def test_get_secret_value(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "test-secret", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['SecretString'] == 'foo-secret' + +@mock_secretsmanager +def test_get_secret_that_does_not_exist(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + get_secret = test_client.post('/', + data={"SecretId": "i-dont-exist", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_get_secret_that_does_not_match(): + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + get_secret = test_client.post('/', + data={"SecretId": "i-dont-match", + "VersionStage": "AWSCURRENT"}, + headers={ + "X-Amz-Target": "secretsmanager.GetSecretValue"}, + ) + json_data = json.loads(get_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_create_secret(): + + backend = server.create_backend_app("secretsmanager") + test_client = backend.test_client() + + res = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foo-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad') + assert json_data['Name'] == 'test-secret' + +@mock_secretsmanager +def test_describe_secret(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "test-secret"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + +@mock_secretsmanager +def test_describe_secret_that_does_not_exist(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_describe_secret_that_does_not_match(): + + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + describe_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.DescribeSecret" + }, + ) + + json_data = json.loads(describe_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "EXAMPLE2-90ab-cdef-fedc-ba987SECRET2" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data # Returned dict is not empty + assert json_data['ARN'] == ( + 'arn:aws:secretsmanager:us-east-1:1234567890:secret:test-secret-rIjad' + ) + assert json_data['Name'] == 'test-secret' + assert json_data['VersionId'] == client_request_token + +# @mock_secretsmanager +# def test_rotate_secret_enable_rotation(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post( +# '/', +# data={ +# "Name": "test-secret", +# "SecretString": "foosecret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# initial_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(initial_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is False +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 0 + +# rotate_secret = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 42} +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# rotated_description = test_client.post( +# '/', +# data={ +# "SecretId": "test-secret" +# }, +# headers={ +# "X-Amz-Target": "secretsmanager.DescribeSecret" +# }, +# ) + +# json_data = json.loads(rotated_description.data.decode("utf-8")) +# assert json_data # Returned dict is not empty +# assert json_data['RotationEnabled'] is True +# assert json_data['RotationRules']['AutomaticallyAfterDays'] == 42 + +@mock_secretsmanager +def test_rotate_secret_that_does_not_exist(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-exist"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_that_does_not_match(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotate_secret = test_client.post('/', + data={"SecretId": "i-dont-match"}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "Secrets Manager can't find the specified secret" + assert json_data['__type'] == 'ResourceNotFoundException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_short(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = "ED9F8B6C-85B7-B7E4-38F2A3BEB13C" + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_client_request_token_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + client_request_token = ( + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C-' + 'ED9F8B6C-85B7-446A-B7E4-38F2A3BEB13C' + ) + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "ClientRequestToken": client_request_token}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "ClientRequestToken must be 32-64 characters long." + assert json_data['__type'] == 'InvalidParameterException' + +@mock_secretsmanager +def test_rotate_secret_rotation_lambda_arn_too_long(): + backend = server.create_backend_app('secretsmanager') + test_client = backend.test_client() + + create_secret = test_client.post('/', + data={"Name": "test-secret", + "SecretString": "foosecret"}, + headers={ + "X-Amz-Target": "secretsmanager.CreateSecret" + }, + ) + + rotation_lambda_arn = '85B7-446A-B7E4' * 147 # == 2058 characters + rotate_secret = test_client.post('/', + data={"SecretId": "test-secret", + "RotationLambdaARN": rotation_lambda_arn}, + headers={ + "X-Amz-Target": "secretsmanager.RotateSecret" + }, + ) + + json_data = json.loads(rotate_secret.data.decode("utf-8")) + assert json_data['message'] == "RotationLambdaARN must <= 2048 characters long." + assert json_data['__type'] == 'InvalidParameterException' + + +# +# The following tests should work, but fail on the embedded dict in +# RotationRules. The error message suggests a problem deeper in the code, which +# needs further investigation. +# + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_zero(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 0}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' + +# @mock_secretsmanager +# def test_rotate_secret_rotation_period_too_long(): +# backend = server.create_backend_app('secretsmanager') +# test_client = backend.test_client() + +# create_secret = test_client.post('/', +# data={"Name": "test-secret", +# "SecretString": "foosecret"}, +# headers={ +# "X-Amz-Target": "secretsmanager.CreateSecret" +# }, +# ) + +# rotate_secret = test_client.post('/', +# data={"SecretId": "test-secret", +# "RotationRules": {"AutomaticallyAfterDays": 1001}}, +# headers={ +# "X-Amz-Target": "secretsmanager.RotateSecret" +# }, +# ) + +# json_data = json.loads(rotate_secret.data.decode("utf-8")) +# assert json_data['message'] == "RotationRules.AutomaticallyAfterDays must be within 1-1000." +# assert json_data['__type'] == 'InvalidParameterException' diff --git a/tests/test_ses/test_server.py b/tests/test_ses/test_server.py index 6af656000..e679f06fb 100644 --- a/tests/test_ses/test_server.py +++ b/tests/test_ses/test_server.py @@ -1,16 +1,16 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_ses_list_identities(): - backend = server.create_backend_app("ses") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListIdentities') - res.data.should.contain(b"ListIdentitiesResponse") +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_ses_list_identities(): + backend = server.create_backend_app("ses") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListIdentities') + res.data.should.contain(b"ListIdentitiesResponse") diff --git a/tests/test_ses/test_ses.py b/tests/test_ses/test_ses.py index 431d42e1d..4514267c3 100644 --- a/tests/test_ses/test_ses.py +++ b/tests/test_ses/test_ses.py @@ -1,116 +1,116 @@ -from __future__ import unicode_literals -import email - -import boto -from boto.exception import BotoServerError - -import sure # noqa - -from moto import mock_ses_deprecated - - -@mock_ses_deprecated -def test_verify_email_identity(): - conn = boto.connect_ses('the_key', 'the_secret') - conn.verify_email_identity("test@example.com") - - identities = conn.list_identities() - address = identities['ListIdentitiesResponse'][ - 'ListIdentitiesResult']['Identities'][0] - address.should.equal('test@example.com') - - -@mock_ses_deprecated -def test_domain_verify(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.verify_domain_dkim("domain1.com") - conn.verify_domain_identity("domain2.com") - - identities = conn.list_identities() - domains = list(identities['ListIdentitiesResponse'][ - 'ListIdentitiesResult']['Identities']) - domains.should.equal(['domain1.com', 'domain2.com']) - - -@mock_ses_deprecated -def test_delete_identity(): - conn = boto.connect_ses('the_key', 'the_secret') - conn.verify_email_identity("test@example.com") - - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ - 'Identities'].should.have.length_of(1) - conn.delete_identity("test@example.com") - conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ - 'Identities'].should.have.length_of(0) - - -@mock_ses_deprecated -def test_send_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.send_email.when.called_with( - "test@example.com", "test subject", - "test body", "test_to@example.com").should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", - "test body", "test_to@example.com") - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses_deprecated -def test_send_html_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - conn.send_email.when.called_with( - "test@example.com", "test subject", - "test body", "test_to@example.com", format="html").should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_email("test@example.com", "test subject", - "test body", "test_to@example.com", format="html") - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses_deprecated -def test_send_raw_email(): - conn = boto.connect_ses('the_key', 'the_secret') - - message = email.mime.multipart.MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com' - - # Message body - part = email.mime.text.MIMEText('test file attached') - message.attach(part) - - # Attachment - part = email.mime.text.MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - conn.send_raw_email.when.called_with( - source=message['From'], - raw_message=message.as_string(), - ).should.throw(BotoServerError) - - conn.verify_email_identity("test@example.com") - conn.send_raw_email( - source=message['From'], - raw_message=message.as_string(), - ) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['GetSendQuotaResponse'][ - 'GetSendQuotaResult']['SentLast24Hours']) - sent_count.should.equal(1) +from __future__ import unicode_literals +import email + +import boto +from boto.exception import BotoServerError + +import sure # noqa + +from moto import mock_ses_deprecated + + +@mock_ses_deprecated +def test_verify_email_identity(): + conn = boto.connect_ses('the_key', 'the_secret') + conn.verify_email_identity("test@example.com") + + identities = conn.list_identities() + address = identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities'][0] + address.should.equal('test@example.com') + + +@mock_ses_deprecated +def test_domain_verify(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.verify_domain_dkim("domain1.com") + conn.verify_domain_identity("domain2.com") + + identities = conn.list_identities() + domains = list(identities['ListIdentitiesResponse'][ + 'ListIdentitiesResult']['Identities']) + domains.should.equal(['domain1.com', 'domain2.com']) + + +@mock_ses_deprecated +def test_delete_identity(): + conn = boto.connect_ses('the_key', 'the_secret') + conn.verify_email_identity("test@example.com") + + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(1) + conn.delete_identity("test@example.com") + conn.list_identities()['ListIdentitiesResponse']['ListIdentitiesResult'][ + 'Identities'].should.have.length_of(0) + + +@mock_ses_deprecated +def test_send_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.send_email.when.called_with( + "test@example.com", "test subject", + "test body", "test_to@example.com").should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com") + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_send_html_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + conn.send_email.when.called_with( + "test@example.com", "test subject", + "test body", "test_to@example.com", format="html").should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_email("test@example.com", "test subject", + "test body", "test_to@example.com", format="html") + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses_deprecated +def test_send_raw_email(): + conn = boto.connect_ses('the_key', 'the_secret') + + message = email.mime.multipart.MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com' + + # Message body + part = email.mime.text.MIMEText('test file attached') + message.attach(part) + + # Attachment + part = email.mime.text.MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + conn.send_raw_email.when.called_with( + source=message['From'], + raw_message=message.as_string(), + ).should.throw(BotoServerError) + + conn.verify_email_identity("test@example.com") + conn.send_raw_email( + source=message['From'], + raw_message=message.as_string(), + ) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['GetSendQuotaResponse'][ + 'GetSendQuotaResult']['SentLast24Hours']) + sent_count.should.equal(1) diff --git a/tests/test_ses/test_ses_boto3.py b/tests/test_ses/test_ses_boto3.py index e800b8035..00d44bffa 100644 --- a/tests/test_ses/test_ses_boto3.py +++ b/tests/test_ses/test_ses_boto3.py @@ -1,194 +1,194 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from six.moves.email_mime_multipart import MIMEMultipart -from six.moves.email_mime_text import MIMEText - -import sure # noqa - -from moto import mock_ses - - -@mock_ses -def test_verify_email_identity(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_identity(EmailAddress="test@example.com") - - identities = conn.list_identities() - address = identities['Identities'][0] - address.should.equal('test@example.com') - -@mock_ses -def test_verify_email_address(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_address(EmailAddress="test@example.com") - email_addresses = conn.list_verified_email_addresses() - email = email_addresses['VerifiedEmailAddresses'][0] - email.should.equal('test@example.com') - -@mock_ses -def test_domain_verify(): - conn = boto3.client('ses', region_name='us-east-1') - - conn.verify_domain_dkim(Domain="domain1.com") - conn.verify_domain_identity(Domain="domain2.com") - - identities = conn.list_identities() - domains = list(identities['Identities']) - domains.should.equal(['domain1.com', 'domain2.com']) - - -@mock_ses -def test_delete_identity(): - conn = boto3.client('ses', region_name='us-east-1') - conn.verify_email_identity(EmailAddress="test@example.com") - - conn.list_identities()['Identities'].should.have.length_of(1) - conn.delete_identity(Identity="test@example.com") - conn.list_identities()['Identities'].should.have.length_of(0) - - -@mock_ses -def test_send_email(): - conn = boto3.client('ses', region_name='us-east-1') - - kwargs = dict( - Source="test@example.com", - Destination={ - "ToAddresses": ["test_to@example.com"], - "CcAddresses": ["test_cc@example.com"], - "BccAddresses": ["test_bcc@example.com"], - }, - Message={ - "Subject": {"Data": "test subject"}, - "Body": {"Text": {"Data": "test body"}} - } - ) - conn.send_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_domain_identity(Domain='example.com') - conn.send_email(**kwargs) - - too_many_addresses = list('to%s@example.com' % i for i in range(51)) - conn.send_email.when.called_with( - **dict(kwargs, Destination={'ToAddresses': too_many_addresses}) - ).should.throw(ClientError) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(3) - - -@mock_ses -def test_send_html_email(): - conn = boto3.client('ses', region_name='us-east-1') - - kwargs = dict( - Source="test@example.com", - Destination={ - "ToAddresses": ["test_to@example.com"] - }, - Message={ - "Subject": {"Data": "test subject"}, - "Body": {"Html": {"Data": "test body"}} - } - ) - - conn.send_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(1) - - -@mock_ses -def test_send_raw_email(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - Source=message['From'], - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_raw_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(2) - - -@mock_ses -def test_send_raw_email_without_source(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['From'] = 'test@example.com' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - - conn.verify_email_identity(EmailAddress="test@example.com") - conn.send_raw_email(**kwargs) - - send_quota = conn.get_send_quota() - sent_count = int(send_quota['SentLast24Hours']) - sent_count.should.equal(2) - - -@mock_ses -def test_send_raw_email_without_source_or_from(): - conn = boto3.client('ses', region_name='us-east-1') - - message = MIMEMultipart() - message['Subject'] = 'Test' - message['To'] = 'to@example.com, foo@example.com' - - # Message body - part = MIMEText('test file attached') - message.attach(part) - # Attachment - part = MIMEText('contents of test file here') - part.add_header('Content-Disposition', 'attachment; filename=test.txt') - message.attach(part) - - kwargs = dict( - RawMessage={'Data': message.as_string()}, - ) - - conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) - +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from six.moves.email_mime_multipart import MIMEMultipart +from six.moves.email_mime_text import MIMEText + +import sure # noqa + +from moto import mock_ses + + +@mock_ses +def test_verify_email_identity(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_identity(EmailAddress="test@example.com") + + identities = conn.list_identities() + address = identities['Identities'][0] + address.should.equal('test@example.com') + +@mock_ses +def test_verify_email_address(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_address(EmailAddress="test@example.com") + email_addresses = conn.list_verified_email_addresses() + email = email_addresses['VerifiedEmailAddresses'][0] + email.should.equal('test@example.com') + +@mock_ses +def test_domain_verify(): + conn = boto3.client('ses', region_name='us-east-1') + + conn.verify_domain_dkim(Domain="domain1.com") + conn.verify_domain_identity(Domain="domain2.com") + + identities = conn.list_identities() + domains = list(identities['Identities']) + domains.should.equal(['domain1.com', 'domain2.com']) + + +@mock_ses +def test_delete_identity(): + conn = boto3.client('ses', region_name='us-east-1') + conn.verify_email_identity(EmailAddress="test@example.com") + + conn.list_identities()['Identities'].should.have.length_of(1) + conn.delete_identity(Identity="test@example.com") + conn.list_identities()['Identities'].should.have.length_of(0) + + +@mock_ses +def test_send_email(): + conn = boto3.client('ses', region_name='us-east-1') + + kwargs = dict( + Source="test@example.com", + Destination={ + "ToAddresses": ["test_to@example.com"], + "CcAddresses": ["test_cc@example.com"], + "BccAddresses": ["test_bcc@example.com"], + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}} + } + ) + conn.send_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_domain_identity(Domain='example.com') + conn.send_email(**kwargs) + + too_many_addresses = list('to%s@example.com' % i for i in range(51)) + conn.send_email.when.called_with( + **dict(kwargs, Destination={'ToAddresses': too_many_addresses}) + ).should.throw(ClientError) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(3) + + +@mock_ses +def test_send_html_email(): + conn = boto3.client('ses', region_name='us-east-1') + + kwargs = dict( + Source="test@example.com", + Destination={ + "ToAddresses": ["test_to@example.com"] + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Html": {"Data": "test body"}} + } + ) + + conn.send_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(1) + + +@mock_ses +def test_send_raw_email(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + Source=message['From'], + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['From'] = 'test@example.com' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + + conn.verify_email_identity(EmailAddress="test@example.com") + conn.send_raw_email(**kwargs) + + send_quota = conn.get_send_quota() + sent_count = int(send_quota['SentLast24Hours']) + sent_count.should.equal(2) + + +@mock_ses +def test_send_raw_email_without_source_or_from(): + conn = boto3.client('ses', region_name='us-east-1') + + message = MIMEMultipart() + message['Subject'] = 'Test' + message['To'] = 'to@example.com, foo@example.com' + + # Message body + part = MIMEText('test file attached') + message.attach(part) + # Attachment + part = MIMEText('contents of test file here') + part.add_header('Content-Disposition', 'attachment; filename=test.txt') + message.attach(part) + + kwargs = dict( + RawMessage={'Data': message.as_string()}, + ) + + conn.send_raw_email.when.called_with(**kwargs).should.throw(ClientError) + diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index 319e4a6f8..e8b5838c0 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -1,308 +1,308 @@ -from __future__ import unicode_literals - -import boto -from boto.exception import BotoServerError -from moto import mock_sns_deprecated -import sure # noqa - - -@mock_sns_deprecated -def test_create_platform_application(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - application_arn.should.equal( - 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') - - -@mock_sns_deprecated -def test_get_platform_application_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ - 'GetPlatformApplicationAttributesResult']['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }) - - -@mock_sns_deprecated -def test_get_missing_platform_application_attributes(): - conn = boto.connect_sns() - conn.get_platform_application_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_set_platform_application_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - conn.set_platform_application_attributes(arn, - {"PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ - 'GetPlatformApplicationAttributesResult']['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "other", - }) - - -@mock_sns_deprecated -def test_list_platform_applications(): - conn = boto.connect_sns() - conn.create_platform_application( - name="application1", - platform="APNS", - ) - conn.create_platform_application( - name="application2", - platform="APNS", - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(2) - - -@mock_sns_deprecated -def test_delete_platform_application(): - conn = boto.connect_sns() - conn.create_platform_application( - name="application1", - platform="APNS", - ) - conn.create_platform_application( - name="application2", - platform="APNS", - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(2) - - application_arn = applications[0]['PlatformApplicationArn'] - conn.delete_platform_application(application_arn) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['ListPlatformApplicationsResponse'][ - 'ListPlatformApplicationsResult']['PlatformApplications'] - applications.should.have.length_of(1) - - -@mock_sns_deprecated -def test_create_platform_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - }, - ) - - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - endpoint_arn.should.contain( - "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") - - -@mock_sns_deprecated -def test_get_list_endpoints_by_platform_application(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - - endpoint_list.should.have.length_of(1) - endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') - endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) - - -@mock_sns_deprecated -def test_get_endpoint_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ - 'GetEndpointAttributesResult']['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'False', - "CustomUserData": "some data", - }) - - -@mock_sns_deprecated -def test_get_missing_endpoint_attributes(): - conn = boto.connect_sns() - conn.get_endpoint_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_set_endpoint_attributes(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - conn.set_endpoint_attributes(endpoint_arn, - {"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ - 'GetEndpointAttributesResult']['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'False', - "CustomUserData": "other data", - }) - - -@mock_sns_deprecated -def test_delete_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": False, - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - - endpoint_list.should.have.length_of(1) - - conn.delete_endpoint(endpoint_arn) - - endpoint_list = conn.list_endpoints_by_platform_application( - platform_application_arn=application_arn - )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] - endpoint_list.should.have.length_of(0) - - -@mock_sns_deprecated -def test_publish_to_platform_endpoint(): - conn = boto.connect_sns() - platform_application = conn.create_platform_application( - name="my-application", - platform="APNS", - ) - application_arn = platform_application['CreatePlatformApplicationResponse'][ - 'CreatePlatformApplicationResult']['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - platform_application_arn=application_arn, - token="some_unique_id", - custom_user_data="some user data", - attributes={ - "Enabled": True, - }, - ) - - endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ - 'CreatePlatformEndpointResult']['EndpointArn'] - - conn.publish(message="some message", message_structure="json", - target_arn=endpoint_arn) +from __future__ import unicode_literals + +import boto +from boto.exception import BotoServerError +from moto import mock_sns_deprecated +import sure # noqa + + +@mock_sns_deprecated +def test_create_platform_application(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + + +@mock_sns_deprecated +def test_get_platform_application_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }) + + +@mock_sns_deprecated +def test_get_missing_platform_application_attributes(): + conn = boto.connect_sns() + conn.get_platform_application_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_set_platform_application_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + conn.set_platform_application_attributes(arn, + {"PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes(arn)['GetPlatformApplicationAttributesResponse'][ + 'GetPlatformApplicationAttributesResult']['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "other", + }) + + +@mock_sns_deprecated +def test_list_platform_applications(): + conn = boto.connect_sns() + conn.create_platform_application( + name="application1", + platform="APNS", + ) + conn.create_platform_application( + name="application2", + platform="APNS", + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(2) + + +@mock_sns_deprecated +def test_delete_platform_application(): + conn = boto.connect_sns() + conn.create_platform_application( + name="application1", + platform="APNS", + ) + conn.create_platform_application( + name="application2", + platform="APNS", + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(2) + + application_arn = applications[0]['PlatformApplicationArn'] + conn.delete_platform_application(application_arn) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['ListPlatformApplicationsResponse'][ + 'ListPlatformApplicationsResult']['PlatformApplications'] + applications.should.have.length_of(1) + + +@mock_sns_deprecated +def test_create_platform_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + }, + ) + + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + + +@mock_sns_deprecated +def test_get_list_endpoints_by_platform_application(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + + endpoint_list.should.have.length_of(1) + endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') + endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) + + +@mock_sns_deprecated +def test_get_endpoint_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'False', + "CustomUserData": "some data", + }) + + +@mock_sns_deprecated +def test_get_missing_endpoint_attributes(): + conn = boto.connect_sns() + conn.get_endpoint_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_set_endpoint_attributes(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + conn.set_endpoint_attributes(endpoint_arn, + {"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes(endpoint_arn)['GetEndpointAttributesResponse'][ + 'GetEndpointAttributesResult']['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'False', + "CustomUserData": "other data", + }) + + +@mock_sns_deprecated +def test_delete_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": False, + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + + endpoint_list.should.have.length_of(1) + + conn.delete_endpoint(endpoint_arn) + + endpoint_list = conn.list_endpoints_by_platform_application( + platform_application_arn=application_arn + )['ListEndpointsByPlatformApplicationResponse']['ListEndpointsByPlatformApplicationResult']['Endpoints'] + endpoint_list.should.have.length_of(0) + + +@mock_sns_deprecated +def test_publish_to_platform_endpoint(): + conn = boto.connect_sns() + platform_application = conn.create_platform_application( + name="my-application", + platform="APNS", + ) + application_arn = platform_application['CreatePlatformApplicationResponse'][ + 'CreatePlatformApplicationResult']['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + platform_application_arn=application_arn, + token="some_unique_id", + custom_user_data="some user data", + attributes={ + "Enabled": True, + }, + ) + + endpoint_arn = endpoint['CreatePlatformEndpointResponse'][ + 'CreatePlatformEndpointResult']['EndpointArn'] + + conn.publish(message="some message", message_structure="json", + target_arn=endpoint_arn) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 1c9695fea..6ba2ed89d 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -1,350 +1,350 @@ -from __future__ import unicode_literals - -import boto3 -from botocore.exceptions import ClientError -from moto import mock_sns -import sure # noqa - - -@mock_sns -def test_create_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - application_arn = response['PlatformApplicationArn'] - application_arn.should.equal( - 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') - - -@mock_sns -def test_get_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['PlatformApplicationArn'] - attributes = conn.get_platform_application_attributes( - PlatformApplicationArn=arn)['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }) - - -@mock_sns -def test_get_missing_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.get_platform_application_attributes.when.called_with( - PlatformApplicationArn="a-fake-arn").should.throw(ClientError) - - -@mock_sns -def test_set_platform_application_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "platform_principal", - }, - ) - arn = platform_application['PlatformApplicationArn'] - conn.set_platform_application_attributes(PlatformApplicationArn=arn, - Attributes={ - "PlatformPrincipal": "other"} - ) - attributes = conn.get_platform_application_attributes( - PlatformApplicationArn=arn)['Attributes'] - attributes.should.equal({ - "PlatformCredential": "platform_credential", - "PlatformPrincipal": "other", - }) - - -@mock_sns -def test_list_platform_applications(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_platform_application( - Name="application1", - Platform="APNS", - Attributes={}, - ) - conn.create_platform_application( - Name="application2", - Platform="APNS", - Attributes={}, - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(2) - - -@mock_sns -def test_delete_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_platform_application( - Name="application1", - Platform="APNS", - Attributes={}, - ) - conn.create_platform_application( - Name="application2", - Platform="APNS", - Attributes={}, - ) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(2) - - application_arn = applications[0]['PlatformApplicationArn'] - conn.delete_platform_application(PlatformApplicationArn=application_arn) - - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse['PlatformApplications'] - applications.should.have.length_of(1) - - -@mock_sns -def test_create_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - endpoint_arn.should.contain( - "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") - - -@mock_sns -def test_create_duplicate_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint = conn.create_platform_endpoint.when.called_with( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ).should.throw(ClientError) - - -@mock_sns -def test_get_list_endpoints_by_platform_application(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - endpoint_list = conn.list_endpoints_by_platform_application( - PlatformApplicationArn=application_arn - )['Endpoints'] - - endpoint_list.should.have.length_of(1) - endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') - endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) - - -@mock_sns -def test_get_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - attributes = conn.get_endpoint_attributes( - EndpointArn=endpoint_arn)['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'false', - "CustomUserData": "some data", - }) - - -@mock_sns -def test_get_missing_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.get_endpoint_attributes.when.called_with( - EndpointArn="a-fake-arn").should.throw(ClientError) - - -@mock_sns -def test_set_endpoint_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - "CustomUserData": "some data", - }, - ) - endpoint_arn = endpoint['EndpointArn'] - - conn.set_endpoint_attributes(EndpointArn=endpoint_arn, - Attributes={"CustomUserData": "other data"} - ) - attributes = conn.get_endpoint_attributes( - EndpointArn=endpoint_arn)['Attributes'] - attributes.should.equal({ - "Token": "some_unique_id", - "Enabled": 'false', - "CustomUserData": "other data", - }) - - -@mock_sns -def test_publish_to_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'true', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - - conn.publish(Message="some message", - MessageStructure="json", TargetArn=endpoint_arn) - - -@mock_sns -def test_publish_to_disabled_platform_endpoint(): - conn = boto3.client('sns', region_name='us-east-1') - platform_application = conn.create_platform_application( - Name="my-application", - Platform="APNS", - Attributes={}, - ) - application_arn = platform_application['PlatformApplicationArn'] - - endpoint = conn.create_platform_endpoint( - PlatformApplicationArn=application_arn, - Token="some_unique_id", - CustomUserData="some user data", - Attributes={ - "Enabled": 'false', - }, - ) - - endpoint_arn = endpoint['EndpointArn'] - - conn.publish.when.called_with( - Message="some message", - MessageStructure="json", - TargetArn=endpoint_arn, - ).should.throw(ClientError) - - -@mock_sns -def test_set_sms_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes() - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') - response['attributes']['test'].should.equal('test') - - -@mock_sns -def test_get_sms_attributes_filtered(): - conn = boto3.client('sns', region_name='us-east-1') - - conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) - - response = conn.get_sms_attributes(attributes=['DefaultSMSType']) - response.should.contain('attributes') - response['attributes'].should.contain('DefaultSMSType') - response['attributes'].should_not.contain('test') - response['attributes']['DefaultSMSType'].should.equal('Transactional') +from __future__ import unicode_literals + +import boto3 +from botocore.exceptions import ClientError +from moto import mock_sns +import sure # noqa + + +@mock_sns +def test_create_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + application_arn = response['PlatformApplicationArn'] + application_arn.should.equal( + 'arn:aws:sns:us-east-1:123456789012:app/APNS/my-application') + + +@mock_sns +def test_get_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['PlatformApplicationArn'] + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }) + + +@mock_sns +def test_get_missing_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.get_platform_application_attributes.when.called_with( + PlatformApplicationArn="a-fake-arn").should.throw(ClientError) + + +@mock_sns +def test_set_platform_application_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "platform_principal", + }, + ) + arn = platform_application['PlatformApplicationArn'] + conn.set_platform_application_attributes(PlatformApplicationArn=arn, + Attributes={ + "PlatformPrincipal": "other"} + ) + attributes = conn.get_platform_application_attributes( + PlatformApplicationArn=arn)['Attributes'] + attributes.should.equal({ + "PlatformCredential": "platform_credential", + "PlatformPrincipal": "other", + }) + + +@mock_sns +def test_list_platform_applications(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_platform_application( + Name="application1", + Platform="APNS", + Attributes={}, + ) + conn.create_platform_application( + Name="application2", + Platform="APNS", + Attributes={}, + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(2) + + +@mock_sns +def test_delete_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_platform_application( + Name="application1", + Platform="APNS", + Attributes={}, + ) + conn.create_platform_application( + Name="application2", + Platform="APNS", + Attributes={}, + ) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(2) + + application_arn = applications[0]['PlatformApplicationArn'] + conn.delete_platform_application(PlatformApplicationArn=application_arn) + + applications_repsonse = conn.list_platform_applications() + applications = applications_repsonse['PlatformApplications'] + applications.should.have.length_of(1) + + +@mock_sns +def test_create_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + endpoint_arn.should.contain( + "arn:aws:sns:us-east-1:123456789012:endpoint/APNS/my-application/") + + +@mock_sns +def test_create_duplicate_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint = conn.create_platform_endpoint.when.called_with( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ).should.throw(ClientError) + + +@mock_sns +def test_get_list_endpoints_by_platform_application(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + endpoint_list = conn.list_endpoints_by_platform_application( + PlatformApplicationArn=application_arn + )['Endpoints'] + + endpoint_list.should.have.length_of(1) + endpoint_list[0]['Attributes']['CustomUserData'].should.equal('some data') + endpoint_list[0]['EndpointArn'].should.equal(endpoint_arn) + + +@mock_sns +def test_get_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'false', + "CustomUserData": "some data", + }) + + +@mock_sns +def test_get_missing_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.get_endpoint_attributes.when.called_with( + EndpointArn="a-fake-arn").should.throw(ClientError) + + +@mock_sns +def test_set_endpoint_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + "CustomUserData": "some data", + }, + ) + endpoint_arn = endpoint['EndpointArn'] + + conn.set_endpoint_attributes(EndpointArn=endpoint_arn, + Attributes={"CustomUserData": "other data"} + ) + attributes = conn.get_endpoint_attributes( + EndpointArn=endpoint_arn)['Attributes'] + attributes.should.equal({ + "Token": "some_unique_id", + "Enabled": 'false', + "CustomUserData": "other data", + }) + + +@mock_sns +def test_publish_to_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'true', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + + conn.publish(Message="some message", + MessageStructure="json", TargetArn=endpoint_arn) + + +@mock_sns +def test_publish_to_disabled_platform_endpoint(): + conn = boto3.client('sns', region_name='us-east-1') + platform_application = conn.create_platform_application( + Name="my-application", + Platform="APNS", + Attributes={}, + ) + application_arn = platform_application['PlatformApplicationArn'] + + endpoint = conn.create_platform_endpoint( + PlatformApplicationArn=application_arn, + Token="some_unique_id", + CustomUserData="some user data", + Attributes={ + "Enabled": 'false', + }, + ) + + endpoint_arn = endpoint['EndpointArn'] + + conn.publish.when.called_with( + Message="some message", + MessageStructure="json", + TargetArn=endpoint_arn, + ).should.throw(ClientError) + + +@mock_sns +def test_set_sms_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes() + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') + response['attributes']['test'].should.equal('test') + + +@mock_sns +def test_get_sms_attributes_filtered(): + conn = boto3.client('sns', region_name='us-east-1') + + conn.set_sms_attributes(attributes={'DefaultSMSType': 'Transactional', 'test': 'test'}) + + response = conn.get_sms_attributes(attributes=['DefaultSMSType']) + response.should.contain('attributes') + response['attributes'].should.contain('DefaultSMSType') + response['attributes'].should_not.contain('test') + response['attributes']['DefaultSMSType'].should.equal('Transactional') diff --git a/tests/test_sns/test_publishing.py b/tests/test_sns/test_publishing.py index 964296837..d04cf5acc 100644 --- a/tests/test_sns/test_publishing.py +++ b/tests/test_sns/test_publishing.py @@ -1,69 +1,69 @@ -from __future__ import unicode_literals - -import boto -import json -import re -from freezegun import freeze_time -import sure # noqa - -from moto import mock_sns_deprecated, mock_sqs_deprecated - - -MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "%s",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' - - -@mock_sqs_deprecated -@mock_sns_deprecated -def test_publish_to_sqs(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - sqs_conn = boto.connect_sqs() - sqs_conn.create_queue("test-queue") - - conn.subscribe(topic_arn, "sqs", - "arn:aws:sqs:us-east-1:123456789012:test-queue") - - message_to_publish = 'my message' - subject_to_publish = "test subject" - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) - published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] - - queue = sqs_conn.get_queue("test-queue") - message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) - acquired_message.should.equal(expected) - - -@mock_sqs_deprecated -@mock_sns_deprecated -def test_publish_to_sqs_in_different_region(): - conn = boto.sns.connect_to_region("us-west-1") - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - sqs_conn = boto.sqs.connect_to_region("us-west-2") - sqs_conn.create_queue("test-queue") - - conn.subscribe(topic_arn, "sqs", - "arn:aws:sqs:us-west-2:123456789012:test-queue") - - message_to_publish = 'my message' - subject_to_publish = "test subject" - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) - published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] - - queue = sqs_conn.get_queue("test-queue") - message = queue.read(1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-west-1') - - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) - acquired_message.should.equal(expected) +from __future__ import unicode_literals + +import boto +import json +import re +from freezegun import freeze_time +import sure # noqa + +from moto import mock_sns_deprecated, mock_sqs_deprecated + + +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "%s",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + +@mock_sqs_deprecated +@mock_sns_deprecated +def test_publish_to_sqs(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + sqs_conn = boto.connect_sqs() + sqs_conn.create_queue("test-queue") + + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-east-1:123456789012:test-queue") + + message_to_publish = 'my message' + subject_to_publish = "test subject" + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] + + queue = sqs_conn.get_queue("test-queue") + message = queue.read(1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) + + +@mock_sqs_deprecated +@mock_sns_deprecated +def test_publish_to_sqs_in_different_region(): + conn = boto.sns.connect_to_region("us-west-1") + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + sqs_conn = boto.sqs.connect_to_region("us-west-2") + sqs_conn.create_queue("test-queue") + + conn.subscribe(topic_arn, "sqs", + "arn:aws:sqs:us-west-2:123456789012:test-queue") + + message_to_publish = 'my message' + subject_to_publish = "test subject" + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(topic=topic_arn, message=message_to_publish, subject=subject_to_publish) + published_message_id = published_message['PublishResponse']['PublishResult']['MessageId'] + + queue = sqs_conn.get_queue("test-queue") + message = queue.read(1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message_to_publish, published_message_id, subject_to_publish, 'us-west-1') + + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", '2015-01-01T12:00:00.000Z', message.get_body()) + acquired_message.should.equal(expected) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 3d598d406..e146ec3c9 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,489 +1,489 @@ -from __future__ import unicode_literals - -import base64 -import json - -import boto3 -import re -from freezegun import freeze_time -import sure # noqa - -import responses -from botocore.exceptions import ClientError -from nose.tools import assert_raises -from moto import mock_sns, mock_sqs - - -MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' - - -@mock_sqs -@mock_sns -def test_publish_to_sqs(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_raw(): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - subscription = topic.subscribe( - Protocol='sqs', Endpoint=queue.attributes['QueueArn']) - - subscription.set_attributes( - AttributeName='RawMessageDelivery', AttributeValue='true') - - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - topic.publish(Message=message) - - messages = queue.receive_messages(MaxNumberOfMessages=1) - messages[0].body.should.equal(message) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_bad(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - try: - # Test missing Value - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': {'DataType': 'String'}}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - try: - # Test empty DataType (if the DataType field is missing entirely - # botocore throws an exception during validation) - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': '', - 'StringValue': 'example_corp' - }}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - try: - # Test empty Value - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': 'String', - 'StringValue': '' - }}) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_msg_attr_byte_value(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - queue = sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - conn.publish( - TopicArn=topic_arn, Message=message, - MessageAttributes={'store': { - 'DataType': 'Binary', - 'BinaryValue': b'\x02\x03\x04' - }}) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': { - 'Type': 'Binary', - 'Value': base64.b64encode(b'\x02\x03\x04').decode() - } - }]) - - -@mock_sns -def test_publish_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - - result = client.publish(PhoneNumber="+15551234567", Message="my message") - result.should.contain('MessageId') - - -@mock_sns -def test_publish_bad_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - - try: - # Test invalid number - client.publish(PhoneNumber="NAA+15551234567", Message="my message") - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - - try: - # Test not found number - client.publish(PhoneNumber="+44001234567", Message="my message") - except ClientError as err: - err.response['Error']['Code'].should.equal('ParameterValueInvalid') - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_dump_json(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - - message = json.dumps({ - "Records": [{ - "eventVersion": "2.0", - "eventSource": "aws:s3", - "s3": { - "s3SchemaVersion": "1.0" - } - }] - }, sort_keys=True) - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - - escaped = message.replace('"', '\\"') - expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, 'us-east-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@mock_sqs -@mock_sns -def test_publish_to_sqs_in_different_region(): - conn = boto3.client('sns', region_name='us-west-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-west-2') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-west-2:123456789012:test-queue") - - message = 'my message' - with freeze_time("2015-01-01 12:00:00"): - published_message = conn.publish(TopicArn=topic_arn, Message=message) - published_message_id = published_message['MessageId'] - - queue = sqs_conn.get_queue_by_name(QueueName="test-queue") - messages = queue.receive_messages(MaxNumberOfMessages=1) - expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-west-1') - acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) - acquired_message.should.equal(expected) - - -@freeze_time("2013-01-01") -@mock_sns -def test_publish_to_http(): - def callback(request): - request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8") - json.loads.when.called_with( - request.body.decode() - ).should_not.throw(Exception) - return 200, {}, "" - - responses.add_callback( - method="POST", - url="http://example.com/foobar", - callback=callback, - ) - - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/foobar") - - response = conn.publish( - TopicArn=topic_arn, Message="my message", Subject="my subject") - - -@mock_sqs -@mock_sns -def test_publish_subject(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - sqs_conn = boto3.resource('sqs', region_name='us-east-1') - sqs_conn.create_queue(QueueName="test-queue") - - conn.subscribe(TopicArn=topic_arn, - Protocol="sqs", - Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") - message = 'my message' - subject1 = 'test subject' - subject2 = 'test subject' * 20 - with freeze_time("2015-01-01 12:00:00"): - conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) - - # Just that it doesnt error is a pass - try: - with freeze_time("2015-01-01 12:00:00"): - conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - else: - raise RuntimeError('Should have raised an InvalidParameter exception') - - -@mock_sns -def test_publish_message_too_long(): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - with assert_raises(ClientError): - topic.publish( - Message="".join(["." for i in range(0, 262145)])) - - # message short enough - does not raise an error - topic.publish( - Message="".join(["." for i in range(0, 262144)])) - - -def _setup_filter_policy_test(filter_policy): - sns = boto3.resource('sns', region_name='us-east-1') - topic = sns.create_topic(Name='some-topic') - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - subscription = topic.subscribe( - Protocol='sqs', Endpoint=queue.attributes['QueueArn']) - - subscription.set_attributes( - AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) - - return topic, subscription, queue - - -@mock_sqs -@mock_sns -def test_filtering_exact_string(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal(['match']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal( - [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_multiple_message_attributes(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_cancelled'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal(['match']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': {'Type': 'String', 'Value': 'example_corp'}, - 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_OR_matching(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp', 'different_corp']}) - - topic.publish( - Message='match example_corp', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}}) - topic.publish( - Message='match different_corp', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'different_corp'}}) - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal( - ['match example_corp', 'match different_corp']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([ - {'store': {'Type': 'String', 'Value': 'example_corp'}}, - {'store': {'Type': 'String', 'Value': 'different_corp'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_AND_matching_positive(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp'], - 'event': ['order_cancelled']}) - - topic.publish( - Message='match example_corp order_cancelled', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_cancelled'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal( - ['match example_corp order_cancelled']) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([{ - 'store': {'Type': 'String', 'Value': 'example_corp'}, - 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_AND_matching_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp'], - 'event': ['order_cancelled']}) - - topic.publish( - Message='match example_corp order_accepted', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'example_corp'}, - 'event': {'DataType': 'String', - 'StringValue': 'order_accepted'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish( - Message='no match', - MessageAttributes={'store': {'DataType': 'String', - 'StringValue': 'different_corp'}}) - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) - - -@mock_sqs -@mock_sns -def test_filtering_exact_string_no_attributes_no_match(): - topic, subscription, queue = _setup_filter_policy_test( - {'store': ['example_corp']}) - - topic.publish(Message='no match') - - messages = queue.receive_messages(MaxNumberOfMessages=5) - message_bodies = [json.loads(m.body)['Message'] for m in messages] - message_bodies.should.equal([]) - message_attributes = [ - json.loads(m.body)['MessageAttributes'] for m in messages] - message_attributes.should.equal([]) +from __future__ import unicode_literals + +import base64 +import json + +import boto3 +import re +from freezegun import freeze_time +import sure # noqa + +import responses +from botocore.exceptions import ClientError +from nose.tools import assert_raises +from moto import mock_sns, mock_sqs + + +MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' + + +@mock_sqs +@mock_sns +def test_publish_to_sqs(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_raw(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='RawMessageDelivery', AttributeValue='true') + + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + topic.publish(Message=message) + + messages = queue.receive_messages(MaxNumberOfMessages=1) + messages[0].body.should.equal(message) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_bad(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + try: + # Test missing Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': {'DataType': 'String'}}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty DataType (if the DataType field is missing entirely + # botocore throws an exception during validation) + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': '', + 'StringValue': 'example_corp' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'String', + 'StringValue': '' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_msg_attr_byte_value(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + queue = sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'Binary', + 'BinaryValue': b'\x02\x03\x04' + }}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': { + 'Type': 'Binary', + 'Value': base64.b64encode(b'\x02\x03\x04').decode() + } + }]) + + +@mock_sns +def test_publish_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + result = client.publish(PhoneNumber="+15551234567", Message="my message") + result.should.contain('MessageId') + + +@mock_sns +def test_publish_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + + try: + # Test invalid number + client.publish(PhoneNumber="NAA+15551234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + try: + # Test not found number + client.publish(PhoneNumber="+44001234567", Message="my message") + except ClientError as err: + err.response['Error']['Code'].should.equal('ParameterValueInvalid') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_dump_json(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + + message = json.dumps({ + "Records": [{ + "eventVersion": "2.0", + "eventSource": "aws:s3", + "s3": { + "s3SchemaVersion": "1.0" + } + }] + }, sort_keys=True) + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + + escaped = message.replace('"', '\\"') + expected = MESSAGE_FROM_SQS_TEMPLATE % (escaped, published_message_id, 'us-east-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_in_different_region(): + conn = boto3.client('sns', region_name='us-west-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-west-2') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-west-2:123456789012:test-queue") + + message = 'my message' + with freeze_time("2015-01-01 12:00:00"): + published_message = conn.publish(TopicArn=topic_arn, Message=message) + published_message_id = published_message['MessageId'] + + queue = sqs_conn.get_queue_by_name(QueueName="test-queue") + messages = queue.receive_messages(MaxNumberOfMessages=1) + expected = MESSAGE_FROM_SQS_TEMPLATE % (message, published_message_id, 'us-west-1') + acquired_message = re.sub("\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z", u'2015-01-01T12:00:00.000Z', messages[0].body) + acquired_message.should.equal(expected) + + +@freeze_time("2013-01-01") +@mock_sns +def test_publish_to_http(): + def callback(request): + request.headers["Content-Type"].should.equal("text/plain; charset=UTF-8") + json.loads.when.called_with( + request.body.decode() + ).should_not.throw(Exception) + return 200, {}, "" + + responses.add_callback( + method="POST", + url="http://example.com/foobar", + callback=callback, + ) + + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/foobar") + + response = conn.publish( + TopicArn=topic_arn, Message="my message", Subject="my subject") + + +@mock_sqs +@mock_sns +def test_publish_subject(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + subject1 = 'test subject' + subject2 = 'test subject' * 20 + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) + + # Just that it doesnt error is a pass + try: + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + else: + raise RuntimeError('Should have raised an InvalidParameter exception') + + +@mock_sns +def test_publish_message_too_long(): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + with assert_raises(ClientError): + topic.publish( + Message="".join(["." for i in range(0, 262145)])) + + # message short enough - does not raise an error + topic.publish( + Message="".join(["." for i in range(0, 262144)])) + + +def _setup_filter_policy_test(filter_policy): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) + + return topic, subscription, queue + + +@mock_sqs +@mock_sns +def test_filtering_exact_string(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal( + [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_multiple_message_attributes(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_OR_matching(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp', 'different_corp']}) + + topic.publish( + Message='match example_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + topic.publish( + Message='match different_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp', 'match different_corp']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([ + {'store': {'Type': 'String', 'Value': 'example_corp'}}, + {'store': {'Type': 'String', 'Value': 'different_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_positive(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_cancelled', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp order_cancelled']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_accepted', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_accepted'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='no match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_attributes_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish(Message='no match') + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index 465dfa2c2..bdaefa453 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -1,24 +1,24 @@ -from __future__ import unicode_literals - -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sns_server_get(): - backend = server.create_backend_app("sns") - test_client = backend.test_client() - - topic_data = test_client.action_data("CreateTopic", Name="testtopic") - topic_data.should.contain("CreateTopicResult") - topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:testtopic") - - topics_data = test_client.action_data("ListTopics") - topics_data.should.contain("ListTopicsResult") - topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:testtopic") +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sns_server_get(): + backend = server.create_backend_app("sns") + test_client = backend.test_client() + + topic_data = test_client.action_data("CreateTopic", Name="testtopic") + topic_data.should.contain("CreateTopicResult") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:testtopic") + + topics_data = test_client.action_data("ListTopics") + topics_data.should.contain("ListTopicsResult") + topic_data.should.contain( + "arn:aws:sns:us-east-1:123456789012:testtopic") diff --git a/tests/test_sns/test_subscriptions.py b/tests/test_sns/test_subscriptions.py index ba241ba44..3a40ba9ad 100644 --- a/tests/test_sns/test_subscriptions.py +++ b/tests/test_sns/test_subscriptions.py @@ -1,135 +1,135 @@ -from __future__ import unicode_literals -import boto - -import sure # noqa - -from moto import mock_sns_deprecated -from moto.sns.models import DEFAULT_PAGE_SIZE - - -@mock_sns_deprecated -def test_creating_subscription(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - conn.subscribe(topic_arn, "http", "http://example.com/") - - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now unsubscribe the subscription - conn.unsubscribe(subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns_deprecated -def test_deleting_subscriptions_by_deleting_topic(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - conn.subscribe(topic_arn, "http", "http://example.com/") - - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now delete the topic - conn.delete_topic(topic_arn) - - # And there should now be 0 topics - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(0) - - # And there should be zero subscriptions left - subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns_deprecated -def test_getting_subscriptions_by_topic(): - conn = boto.connect_sns() - conn.create_topic("topic1") - conn.create_topic("topic2") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - conn.subscribe(topic1_arn, "http", "http://example1.com/") - conn.subscribe(topic2_arn, "http", "http://example2.com/") - - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ - "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] - topic1_subscriptions.should.have.length_of(1) - topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") - - -@mock_sns_deprecated -def test_subscription_paging(): - conn = boto.connect_sns() - conn.create_topic("topic1") - conn.create_topic("topic2") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(topic1_arn, 'email', 'email_' + - str(index) + '@test.com') - conn.subscribe(topic2_arn, 'email', 'email_' + - str(index) + '@test.com') - - all_subscriptions = conn.get_all_subscriptions() - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ - "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["NextToken"] - next_token.should.equal(DEFAULT_PAGE_SIZE) - - all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) - all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ - "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) - next_token = all_subscriptions["ListSubscriptionsResponse"][ - "ListSubscriptionsResult"]["NextToken"] - next_token.should.equal(None) - - topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ - "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ - "ListSubscriptionsByTopicResult"]["NextToken"] - next_token.should.equal(DEFAULT_PAGE_SIZE) - - topic1_subscriptions = conn.get_all_subscriptions_by_topic( - topic1_arn, next_token=next_token) - topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ - "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) - next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ - "ListSubscriptionsByTopicResult"]["NextToken"] - next_token.should.equal(None) +from __future__ import unicode_literals +import boto + +import sure # noqa + +from moto import mock_sns_deprecated +from moto.sns.models import DEFAULT_PAGE_SIZE + + +@mock_sns_deprecated +def test_creating_subscription(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + conn.subscribe(topic_arn, "http", "http://example.com/") + + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now unsubscribe the subscription + conn.unsubscribe(subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns_deprecated +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + conn.subscribe(topic_arn, "http", "http://example.com/") + + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(topic_arn) + + # And there should now be 0 topics + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.get_all_subscriptions()["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns_deprecated +def test_getting_subscriptions_by_topic(): + conn = boto.connect_sns() + conn.create_topic("topic1") + conn.create_topic("topic2") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + conn.subscribe(topic1_arn, "http", "http://example1.com/") + conn.subscribe(topic2_arn, "http", "http://example2.com/") + + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn)[ + "ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"]["Subscriptions"] + topic1_subscriptions.should.have.length_of(1) + topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") + + +@mock_sns_deprecated +def test_subscription_paging(): + conn = boto.connect_sns() + conn.create_topic("topic1") + conn.create_topic("topic2") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): + conn.subscribe(topic1_arn, 'email', 'email_' + + str(index) + '@test.com') + conn.subscribe(topic2_arn, 'email', 'email_' + + str(index) + '@test.com') + + all_subscriptions = conn.get_all_subscriptions() + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] + next_token.should.equal(DEFAULT_PAGE_SIZE) + + all_subscriptions = conn.get_all_subscriptions(next_token=next_token * 2) + all_subscriptions["ListSubscriptionsResponse"]["ListSubscriptionsResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE * 2 / 3)) + next_token = all_subscriptions["ListSubscriptionsResponse"][ + "ListSubscriptionsResult"]["NextToken"] + next_token.should.equal(None) + + topic1_subscriptions = conn.get_all_subscriptions_by_topic(topic1_arn) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] + next_token.should.equal(DEFAULT_PAGE_SIZE) + + topic1_subscriptions = conn.get_all_subscriptions_by_topic( + topic1_arn, next_token=next_token) + topic1_subscriptions["ListSubscriptionsByTopicResponse"]["ListSubscriptionsByTopicResult"][ + "Subscriptions"].should.have.length_of(int(DEFAULT_PAGE_SIZE / 3)) + next_token = topic1_subscriptions["ListSubscriptionsByTopicResponse"][ + "ListSubscriptionsByTopicResult"]["NextToken"] + next_token.should.equal(None) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 2a56c8213..d7a32e0c6 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -1,396 +1,396 @@ -from __future__ import unicode_literals -import boto3 -import json - -import sure # noqa - -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_sns -from moto.sns.models import DEFAULT_PAGE_SIZE - - -@mock_sns -def test_subscribe_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - resp = client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='+15551234567' - ) - resp.should.contain('SubscriptionArn') - -@mock_sns -def test_double_subscription(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - do_subscribe_sqs = lambda sqs_arn: client.subscribe( - TopicArn=arn, - Protocol='sqs', - Endpoint=sqs_arn - ) - resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') - resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') - - resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) - - -@mock_sns -def test_subscribe_bad_sms(): - client = boto3.client('sns', region_name='us-east-1') - client.create_topic(Name="some-topic") - resp = client.create_topic(Name="some-topic") - arn = resp['TopicArn'] - - try: - # Test invalid number - client.subscribe( - TopicArn=arn, - Protocol='sms', - Endpoint='NAA+15551234567' - ) - except ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameter') - - -@mock_sns -def test_creating_subscription(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now unsubscribe the subscription - conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns -def test_deleting_subscriptions_by_deleting_topic(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Now delete the topic - conn.delete_topic(TopicArn=topic_arn) - - # And there should now be 0 topics - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(0) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - -@mock_sns -def test_getting_subscriptions_by_topic(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="topic1") - conn.create_topic(Name="topic2") - - response = conn.list_topics() - topics = response["Topics"] - topic1_arn = topics[0]['TopicArn'] - topic2_arn = topics[1]['TopicArn'] - - conn.subscribe(TopicArn=topic1_arn, - Protocol="http", - Endpoint="http://example1.com/") - conn.subscribe(TopicArn=topic2_arn, - Protocol="http", - Endpoint="http://example2.com/") - - topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ - "Subscriptions"] - topic1_subscriptions.should.have.length_of(1) - topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") - - -@mock_sns -def test_subscription_paging(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="topic1") - - response = conn.list_topics() - topics = response["Topics"] - topic1_arn = topics[0]['TopicArn'] - - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): - conn.subscribe(TopicArn=topic1_arn, - Protocol='email', - Endpoint='email_' + str(index) + '@test.com') - - all_subscriptions = conn.list_subscriptions() - all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) - next_token = all_subscriptions["NextToken"] - next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - - all_subscriptions = conn.list_subscriptions(NextToken=next_token) - all_subscriptions["Subscriptions"].should.have.length_of( - int(DEFAULT_PAGE_SIZE / 3)) - all_subscriptions.shouldnt.have("NextToken") - - topic1_subscriptions = conn.list_subscriptions_by_topic( - TopicArn=topic1_arn) - topic1_subscriptions["Subscriptions"].should.have.length_of( - DEFAULT_PAGE_SIZE) - next_token = topic1_subscriptions["NextToken"] - next_token.should.equal(str(DEFAULT_PAGE_SIZE)) - - topic1_subscriptions = conn.list_subscriptions_by_topic( - TopicArn=topic1_arn, NextToken=next_token) - topic1_subscriptions["Subscriptions"].should.have.length_of( - int(DEFAULT_PAGE_SIZE / 3)) - topic1_subscriptions.shouldnt.have("NextToken") - - -@mock_sns -def test_creating_subscription_with_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - delivery_policy = json.dumps({ - 'healthyRetryPolicy': { - "numRetries": 10, - "minDelayTarget": 1, - "maxDelayTarget":2 - } - }) - - filter_policy = json.dumps({ - "store": ["example_corp"], - "event": ["order_cancelled"], - "encrypted": [False], - "customer_interests": ["basketball", "baseball"] - }) - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/", - Attributes={ - 'RawMessageDelivery': 'true', - 'DeliveryPolicy': delivery_policy, - 'FilterPolicy': filter_policy - }) - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - # Test the subscription attributes have been set - subscription_arn = subscription["SubscriptionArn"] - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - - attrs['Attributes']['RawMessageDelivery'].should.equal('true') - attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) - attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) - - # Now unsubscribe the subscription - conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) - - # And there should be zero subscriptions left - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(0) - - # invalid attr name - with assert_raises(ClientError): - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/", - Attributes={ - 'InvalidName': 'true' - }) - - -@mock_sns -def test_set_subscription_attributes(): - conn = boto3.client('sns', region_name='us-east-1') - conn.create_topic(Name="some-topic") - response = conn.list_topics() - topic_arn = response["Topics"][0]['TopicArn'] - - conn.subscribe(TopicArn=topic_arn, - Protocol="http", - Endpoint="http://example.com/") - - subscriptions = conn.list_subscriptions()["Subscriptions"] - subscriptions.should.have.length_of(1) - subscription = subscriptions[0] - subscription["TopicArn"].should.equal(topic_arn) - subscription["Protocol"].should.equal("http") - subscription["SubscriptionArn"].should.contain(topic_arn) - subscription["Endpoint"].should.equal("http://example.com/") - - subscription_arn = subscription["SubscriptionArn"] - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - attrs.should.have.key('Attributes') - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='RawMessageDelivery', - AttributeValue='true' - ) - delivery_policy = json.dumps({ - 'healthyRetryPolicy': { - "numRetries": 10, - "minDelayTarget": 1, - "maxDelayTarget":2 - } - }) - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='DeliveryPolicy', - AttributeValue=delivery_policy - ) - - filter_policy = json.dumps({ - "store": ["example_corp"], - "event": ["order_cancelled"], - "encrypted": [False], - "customer_interests": ["basketball", "baseball"] - }) - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='FilterPolicy', - AttributeValue=filter_policy - ) - - attrs = conn.get_subscription_attributes( - SubscriptionArn=subscription_arn - ) - - attrs['Attributes']['RawMessageDelivery'].should.equal('true') - attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) - attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) - - # not existing subscription - with assert_raises(ClientError): - conn.set_subscription_attributes( - SubscriptionArn='invalid', - AttributeName='RawMessageDelivery', - AttributeValue='true' - ) - with assert_raises(ClientError): - attrs = conn.get_subscription_attributes( - SubscriptionArn='invalid' - ) - - - # invalid attr name - with assert_raises(ClientError): - conn.set_subscription_attributes( - SubscriptionArn=subscription_arn, - AttributeName='InvalidName', - AttributeValue='true' - ) - - -@mock_sns -def test_check_not_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(False) - - -@mock_sns -def test_check_opted_out(): - # Phone number ends in 99 so is hardcoded in the endpoint to return opted - # out status - conn = boto3.client('sns', region_name='us-east-1') - response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') - - response.should.contain('isOptedOut') - response['isOptedOut'].should.be(True) - - -@mock_sns -def test_check_opted_out_invalid(): - conn = boto3.client('sns', region_name='us-east-1') - - # Invalid phone number - with assert_raises(ClientError): - conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') - - -@mock_sns -def test_list_opted_out(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - - response.should.contain('phoneNumbers') - len(response['phoneNumbers']).should.be.greater_than(0) - - -@mock_sns -def test_opt_in(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.list_phone_numbers_opted_out() - current_len = len(response['phoneNumbers']) - assert current_len > 0 - - conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) - - response = conn.list_phone_numbers_opted_out() - len(response['phoneNumbers']).should.be.greater_than(0) - len(response['phoneNumbers']).should.be.lower_than(current_len) - - -@mock_sns -def test_confirm_subscription(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testconfirm') - - conn.confirm_subscription( - TopicArn=response['TopicArn'], - Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', - AuthenticateOnUnsubscribe='true' - ) +from __future__ import unicode_literals +import boto3 +import json + +import sure # noqa + +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_sns +from moto.sns.models import DEFAULT_PAGE_SIZE + + +@mock_sns +def test_subscribe_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + resp = client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='+15551234567' + ) + resp.should.contain('SubscriptionArn') + +@mock_sns +def test_double_subscription(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + do_subscribe_sqs = lambda sqs_arn: client.subscribe( + TopicArn=arn, + Protocol='sqs', + Endpoint=sqs_arn + ) + resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + + resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) + + +@mock_sns +def test_subscribe_bad_sms(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + try: + # Test invalid number + client.subscribe( + TopicArn=arn, + Protocol='sms', + Endpoint='NAA+15551234567' + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + + +@mock_sns +def test_creating_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now unsubscribe the subscription + conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns +def test_deleting_subscriptions_by_deleting_topic(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Now delete the topic + conn.delete_topic(TopicArn=topic_arn) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + +@mock_sns +def test_getting_subscriptions_by_topic(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="topic1") + conn.create_topic(Name="topic2") + + response = conn.list_topics() + topics = response["Topics"] + topic1_arn = topics[0]['TopicArn'] + topic2_arn = topics[1]['TopicArn'] + + conn.subscribe(TopicArn=topic1_arn, + Protocol="http", + Endpoint="http://example1.com/") + conn.subscribe(TopicArn=topic2_arn, + Protocol="http", + Endpoint="http://example2.com/") + + topic1_subscriptions = conn.list_subscriptions_by_topic(TopicArn=topic1_arn)[ + "Subscriptions"] + topic1_subscriptions.should.have.length_of(1) + topic1_subscriptions[0]['Endpoint'].should.equal("http://example1.com/") + + +@mock_sns +def test_subscription_paging(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="topic1") + + response = conn.list_topics() + topics = response["Topics"] + topic1_arn = topics[0]['TopicArn'] + + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 3)): + conn.subscribe(TopicArn=topic1_arn, + Protocol='email', + Endpoint='email_' + str(index) + '@test.com') + + all_subscriptions = conn.list_subscriptions() + all_subscriptions["Subscriptions"].should.have.length_of(DEFAULT_PAGE_SIZE) + next_token = all_subscriptions["NextToken"] + next_token.should.equal(str(DEFAULT_PAGE_SIZE)) + + all_subscriptions = conn.list_subscriptions(NextToken=next_token) + all_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) + all_subscriptions.shouldnt.have("NextToken") + + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn) + topic1_subscriptions["Subscriptions"].should.have.length_of( + DEFAULT_PAGE_SIZE) + next_token = topic1_subscriptions["NextToken"] + next_token.should.equal(str(DEFAULT_PAGE_SIZE)) + + topic1_subscriptions = conn.list_subscriptions_by_topic( + TopicArn=topic1_arn, NextToken=next_token) + topic1_subscriptions["Subscriptions"].should.have.length_of( + int(DEFAULT_PAGE_SIZE / 3)) + topic1_subscriptions.shouldnt.have("NextToken") + + +@mock_sns +def test_creating_subscription_with_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'RawMessageDelivery': 'true', + 'DeliveryPolicy': delivery_policy, + 'FilterPolicy': filter_policy + }) + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + # Test the subscription attributes have been set + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) + + # Now unsubscribe the subscription + conn.unsubscribe(SubscriptionArn=subscription["SubscriptionArn"]) + + # And there should be zero subscriptions left + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(0) + + # invalid attr name + with assert_raises(ClientError): + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/", + Attributes={ + 'InvalidName': 'true' + }) + + +@mock_sns +def test_set_subscription_attributes(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + conn.subscribe(TopicArn=topic_arn, + Protocol="http", + Endpoint="http://example.com/") + + subscriptions = conn.list_subscriptions()["Subscriptions"] + subscriptions.should.have.length_of(1) + subscription = subscriptions[0] + subscription["TopicArn"].should.equal(topic_arn) + subscription["Protocol"].should.equal("http") + subscription["SubscriptionArn"].should.contain(topic_arn) + subscription["Endpoint"].should.equal("http://example.com/") + + subscription_arn = subscription["SubscriptionArn"] + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + attrs.should.have.key('Attributes') + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + delivery_policy = json.dumps({ + 'healthyRetryPolicy': { + "numRetries": 10, + "minDelayTarget": 1, + "maxDelayTarget":2 + } + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='DeliveryPolicy', + AttributeValue=delivery_policy + ) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='FilterPolicy', + AttributeValue=filter_policy + ) + + attrs = conn.get_subscription_attributes( + SubscriptionArn=subscription_arn + ) + + attrs['Attributes']['RawMessageDelivery'].should.equal('true') + attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) + + # not existing subscription + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn='invalid', + AttributeName='RawMessageDelivery', + AttributeValue='true' + ) + with assert_raises(ClientError): + attrs = conn.get_subscription_attributes( + SubscriptionArn='invalid' + ) + + + # invalid attr name + with assert_raises(ClientError): + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='InvalidName', + AttributeValue='true' + ) + + +@mock_sns +def test_check_not_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545375') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(False) + + +@mock_sns +def test_check_opted_out(): + # Phone number ends in 99 so is hardcoded in the endpoint to return opted + # out status + conn = boto3.client('sns', region_name='us-east-1') + response = conn.check_if_phone_number_is_opted_out(phoneNumber='+447428545399') + + response.should.contain('isOptedOut') + response['isOptedOut'].should.be(True) + + +@mock_sns +def test_check_opted_out_invalid(): + conn = boto3.client('sns', region_name='us-east-1') + + # Invalid phone number + with assert_raises(ClientError): + conn.check_if_phone_number_is_opted_out(phoneNumber='+44742LALALA') + + +@mock_sns +def test_list_opted_out(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + + response.should.contain('phoneNumbers') + len(response['phoneNumbers']).should.be.greater_than(0) + + +@mock_sns +def test_opt_in(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.list_phone_numbers_opted_out() + current_len = len(response['phoneNumbers']) + assert current_len > 0 + + conn.opt_in_phone_number(phoneNumber=response['phoneNumbers'][0]) + + response = conn.list_phone_numbers_opted_out() + len(response['phoneNumbers']).should.be.greater_than(0) + len(response['phoneNumbers']).should.be.lower_than(current_len) + + +@mock_sns +def test_confirm_subscription(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testconfirm') + + conn.confirm_subscription( + TopicArn=response['TopicArn'], + Token='2336412f37fb687f5d51e6e241d59b68c4e583a5cee0be6f95bbf97ab8d2441cf47b99e848408adaadf4c197e65f03473d53c4ba398f6abbf38ce2e8ebf7b4ceceb2cd817959bcde1357e58a2861b05288c535822eb88cac3db04f592285249971efc6484194fc4a4586147f16916692', + AuthenticateOnUnsubscribe='true' + ) diff --git a/tests/test_sns/test_topics.py b/tests/test_sns/test_topics.py index 1b039c51d..928db8d02 100644 --- a/tests/test_sns/test_topics.py +++ b/tests/test_sns/test_topics.py @@ -1,133 +1,133 @@ -from __future__ import unicode_literals -import boto -import json -import six - -import sure # noqa - -from boto.exception import BotoServerError -from moto import mock_sns_deprecated -from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE - - -@mock_sns_deprecated -def test_create_and_delete_topic(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(1) - topics[0]['TopicArn'].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn.region.name) - ) - - # Delete the topic - conn.delete_topic(topics[0]['TopicArn']) - - # And there should now be 0 topics - topics_json = conn.get_all_topics() - topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] - topics.should.have.length_of(0) - - -@mock_sns_deprecated -def test_get_missing_topic(): - conn = boto.connect_sns() - conn.get_topic_attributes.when.called_with( - "a-fake-arn").should.throw(BotoServerError) - - -@mock_sns_deprecated -def test_create_topic_in_multiple_regions(): - for region in ['us-west-1', 'us-west-2']: - conn = boto.sns.connect_to_region(region) - conn.create_topic("some-topic") - list(conn.get_all_topics()["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"]).should.have.length_of(1) - - -@mock_sns_deprecated -def test_topic_corresponds_to_region(): - for region in ['us-east-1', 'us-west-2']: - conn = boto.sns.connect_to_region(region) - conn.create_topic("some-topic") - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - topic_arn.should.equal( - "arn:aws:sns:{0}:123456789012:some-topic".format(region)) - - -@mock_sns_deprecated -def test_topic_attributes(): - conn = boto.connect_sns() - conn.create_topic("some-topic") - - topics_json = conn.get_all_topics() - topic_arn = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"][0]['TopicArn'] - - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ - 'GetTopicAttributesResult']['Attributes'] - attributes["TopicArn"].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn.region.name) - ) - attributes["Owner"].should.equal(123456789012) - json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) - attributes["DisplayName"].should.equal("") - attributes["SubscriptionsPending"].should.equal(0) - attributes["SubscriptionsConfirmed"].should.equal(0) - attributes["SubscriptionsDeleted"].should.equal(0) - attributes["DeliveryPolicy"].should.equal("") - json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( - DEFAULT_EFFECTIVE_DELIVERY_POLICY) - - # boto can't handle prefix-mandatory strings: - # i.e. unicode on Python 2 -- u"foobar" - # and bytes on Python 3 -- b"foobar" - if six.PY2: - policy = {b"foo": b"bar"} - displayname = b"My display name" - delivery = {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}} - else: - policy = {u"foo": u"bar"} - displayname = u"My display name" - delivery = {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}} - conn.set_topic_attributes(topic_arn, "Policy", policy) - conn.set_topic_attributes(topic_arn, "DisplayName", displayname) - conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) - - attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ - 'GetTopicAttributesResult']['Attributes'] - attributes["Policy"].should.equal("{'foo': 'bar'}") - attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal( - "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") - - -@mock_sns_deprecated -def test_topic_paging(): - conn = boto.connect_sns() - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): - conn.create_topic("some-topic_" + str(index)) - - topics_json = conn.get_all_topics() - topics_list = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["NextToken"] - - len(topics_list).should.equal(DEFAULT_PAGE_SIZE) - next_token.should.equal(DEFAULT_PAGE_SIZE) - - topics_json = conn.get_all_topics(next_token=next_token) - topics_list = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["Topics"] - next_token = topics_json["ListTopicsResponse"][ - "ListTopicsResult"]["NextToken"] - - topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) - next_token.should.equal(None) +from __future__ import unicode_literals +import boto +import json +import six + +import sure # noqa + +from boto.exception import BotoServerError +from moto import mock_sns_deprecated +from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE + + +@mock_sns_deprecated +def test_create_and_delete_topic(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(1) + topics[0]['TopicArn'].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn.region.name) + ) + + # Delete the topic + conn.delete_topic(topics[0]['TopicArn']) + + # And there should now be 0 topics + topics_json = conn.get_all_topics() + topics = topics_json["ListTopicsResponse"]["ListTopicsResult"]["Topics"] + topics.should.have.length_of(0) + + +@mock_sns_deprecated +def test_get_missing_topic(): + conn = boto.connect_sns() + conn.get_topic_attributes.when.called_with( + "a-fake-arn").should.throw(BotoServerError) + + +@mock_sns_deprecated +def test_create_topic_in_multiple_regions(): + for region in ['us-west-1', 'us-west-2']: + conn = boto.sns.connect_to_region(region) + conn.create_topic("some-topic") + list(conn.get_all_topics()["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"]).should.have.length_of(1) + + +@mock_sns_deprecated +def test_topic_corresponds_to_region(): + for region in ['us-east-1', 'us-west-2']: + conn = boto.sns.connect_to_region(region) + conn.create_topic("some-topic") + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + + +@mock_sns_deprecated +def test_topic_attributes(): + conn = boto.connect_sns() + conn.create_topic("some-topic") + + topics_json = conn.get_all_topics() + topic_arn = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"][0]['TopicArn'] + + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] + attributes["TopicArn"].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn.region.name) + ) + attributes["Owner"].should.equal(123456789012) + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) + attributes["DisplayName"].should.equal("") + attributes["SubscriptionsPending"].should.equal(0) + attributes["SubscriptionsConfirmed"].should.equal(0) + attributes["SubscriptionsDeleted"].should.equal(0) + attributes["DeliveryPolicy"].should.equal("") + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) + + # boto can't handle prefix-mandatory strings: + # i.e. unicode on Python 2 -- u"foobar" + # and bytes on Python 3 -- b"foobar" + if six.PY2: + policy = {b"foo": b"bar"} + displayname = b"My display name" + delivery = {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}} + else: + policy = {u"foo": u"bar"} + displayname = u"My display name" + delivery = {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}} + conn.set_topic_attributes(topic_arn, "Policy", policy) + conn.set_topic_attributes(topic_arn, "DisplayName", displayname) + conn.set_topic_attributes(topic_arn, "DeliveryPolicy", delivery) + + attributes = conn.get_topic_attributes(topic_arn)['GetTopicAttributesResponse'][ + 'GetTopicAttributesResult']['Attributes'] + attributes["Policy"].should.equal("{'foo': 'bar'}") + attributes["DisplayName"].should.equal("My display name") + attributes["DeliveryPolicy"].should.equal( + "{'http': {'defaultHealthyRetryPolicy': {'numRetries': 5}}}") + + +@mock_sns_deprecated +def test_topic_paging(): + conn = boto.connect_sns() + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): + conn.create_topic("some-topic_" + str(index)) + + topics_json = conn.get_all_topics() + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] + + len(topics_list).should.equal(DEFAULT_PAGE_SIZE) + next_token.should.equal(DEFAULT_PAGE_SIZE) + + topics_json = conn.get_all_topics(next_token=next_token) + topics_list = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["Topics"] + next_token = topics_json["ListTopicsResponse"][ + "ListTopicsResult"]["NextToken"] + + topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) + next_token.should.equal(None) diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index 7d9a27b18..f836535ef 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -1,190 +1,190 @@ -from __future__ import unicode_literals -import boto3 -import six -import json - -import sure # noqa - -from botocore.exceptions import ClientError -from moto import mock_sns -from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE - - -@mock_sns -def test_create_and_delete_topic(): - conn = boto3.client("sns", region_name="us-east-1") - for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256): - conn.create_topic(Name=topic_name) - - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(1) - topics[0]['TopicArn'].should.equal( - "arn:aws:sns:{0}:123456789012:{1}" - .format(conn._client_config.region_name, topic_name) - ) - - # Delete the topic - conn.delete_topic(TopicArn=topics[0]['TopicArn']) - - # And there should now be 0 topics - topics_json = conn.list_topics() - topics = topics_json["Topics"] - topics.should.have.length_of(0) - -@mock_sns -def test_create_topic_should_be_indempodent(): - conn = boto3.client("sns", region_name="us-east-1") - topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] - conn.set_topic_attributes( - TopicArn=topic_arn, - AttributeName="DisplayName", - AttributeValue="should_be_set" - ) - topic_display_name = conn.get_topic_attributes( - TopicArn=topic_arn - )['Attributes']['DisplayName'] - topic_display_name.should.be.equal("should_be_set") - - #recreate topic to prove indempodentcy - topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] - topic_display_name = conn.get_topic_attributes( - TopicArn=topic_arn - )['Attributes']['DisplayName'] - topic_display_name.should.be.equal("should_be_set") - -@mock_sns -def test_get_missing_topic(): - conn = boto3.client("sns", region_name="us-east-1") - conn.get_topic_attributes.when.called_with( - TopicArn="a-fake-arn").should.throw(ClientError) - -@mock_sns -def test_create_topic_must_meet_constraints(): - conn = boto3.client("sns", region_name="us-east-1") - common_random_chars = [':', ";", "!", "@", "|", "^", "%"] - for char in common_random_chars: - conn.create_topic.when.called_with( - Name="no%s_invalidchar" % char).should.throw(ClientError) - conn.create_topic.when.called_with( - Name="no spaces allowed").should.throw(ClientError) - - -@mock_sns -def test_create_topic_should_be_of_certain_length(): - conn = boto3.client("sns", region_name="us-east-1") - too_short = "" - conn.create_topic.when.called_with( - Name=too_short).should.throw(ClientError) - too_long = "x" * 257 - conn.create_topic.when.called_with( - Name=too_long).should.throw(ClientError) - - -@mock_sns -def test_create_topic_in_multiple_regions(): - for region in ['us-west-1', 'us-west-2']: - conn = boto3.client("sns", region_name=region) - conn.create_topic(Name="some-topic") - list(conn.list_topics()["Topics"]).should.have.length_of(1) - - -@mock_sns -def test_topic_corresponds_to_region(): - for region in ['us-east-1', 'us-west-2']: - conn = boto3.client("sns", region_name=region) - conn.create_topic(Name="some-topic") - topics_json = conn.list_topics() - topic_arn = topics_json["Topics"][0]['TopicArn'] - topic_arn.should.equal( - "arn:aws:sns:{0}:123456789012:some-topic".format(region)) - - -@mock_sns -def test_topic_attributes(): - conn = boto3.client("sns", region_name="us-east-1") - conn.create_topic(Name="some-topic") - - topics_json = conn.list_topics() - topic_arn = topics_json["Topics"][0]['TopicArn'] - - attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] - attributes["TopicArn"].should.equal( - "arn:aws:sns:{0}:123456789012:some-topic" - .format(conn._client_config.region_name) - ) - attributes["Owner"].should.equal('123456789012') - json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) - attributes["DisplayName"].should.equal("") - attributes["SubscriptionsPending"].should.equal('0') - attributes["SubscriptionsConfirmed"].should.equal('0') - attributes["SubscriptionsDeleted"].should.equal('0') - attributes["DeliveryPolicy"].should.equal("") - json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( - DEFAULT_EFFECTIVE_DELIVERY_POLICY) - - # boto can't handle prefix-mandatory strings: - # i.e. unicode on Python 2 -- u"foobar" - # and bytes on Python 3 -- b"foobar" - if six.PY2: - policy = json.dumps({b"foo": b"bar"}) - displayname = b"My display name" - delivery = json.dumps( - {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) - else: - policy = json.dumps({u"foo": u"bar"}) - displayname = u"My display name" - delivery = json.dumps( - {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="Policy", - AttributeValue=policy) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="DisplayName", - AttributeValue=displayname) - conn.set_topic_attributes(TopicArn=topic_arn, - AttributeName="DeliveryPolicy", - AttributeValue=delivery) - - attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] - attributes["Policy"].should.equal('{"foo": "bar"}') - attributes["DisplayName"].should.equal("My display name") - attributes["DeliveryPolicy"].should.equal( - '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') - - -@mock_sns -def test_topic_paging(): - conn = boto3.client("sns", region_name="us-east-1") - for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): - conn.create_topic(Name="some-topic_" + str(index)) - - response = conn.list_topics() - topics_list = response["Topics"] - next_token = response["NextToken"] - - len(topics_list).should.equal(DEFAULT_PAGE_SIZE) - int(next_token).should.equal(DEFAULT_PAGE_SIZE) - - response = conn.list_topics(NextToken=next_token) - topics_list = response["Topics"] - response.shouldnt.have("NextToken") - - topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) - - -@mock_sns -def test_add_remove_permissions(): - conn = boto3.client('sns', region_name='us-east-1') - response = conn.create_topic(Name='testpermissions') - - conn.add_permission( - TopicArn=response['TopicArn'], - Label='Test1234', - AWSAccountId=['999999999999'], - ActionName=['AddPermission'] - ) - conn.remove_permission( - TopicArn=response['TopicArn'], - Label='Test1234' - ) +from __future__ import unicode_literals +import boto3 +import six +import json + +import sure # noqa + +from botocore.exceptions import ClientError +from moto import mock_sns +from moto.sns.models import DEFAULT_TOPIC_POLICY, DEFAULT_EFFECTIVE_DELIVERY_POLICY, DEFAULT_PAGE_SIZE + + +@mock_sns +def test_create_and_delete_topic(): + conn = boto3.client("sns", region_name="us-east-1") + for topic_name in ('some-topic', '-some-topic-', '_some-topic_', 'a' * 256): + conn.create_topic(Name=topic_name) + + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(1) + topics[0]['TopicArn'].should.equal( + "arn:aws:sns:{0}:123456789012:{1}" + .format(conn._client_config.region_name, topic_name) + ) + + # Delete the topic + conn.delete_topic(TopicArn=topics[0]['TopicArn']) + + # And there should now be 0 topics + topics_json = conn.list_topics() + topics = topics_json["Topics"] + topics.should.have.length_of(0) + +@mock_sns +def test_create_topic_should_be_indempodent(): + conn = boto3.client("sns", region_name="us-east-1") + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + conn.set_topic_attributes( + TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue="should_be_set" + ) + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + + #recreate topic to prove indempodentcy + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + +@mock_sns +def test_get_missing_topic(): + conn = boto3.client("sns", region_name="us-east-1") + conn.get_topic_attributes.when.called_with( + TopicArn="a-fake-arn").should.throw(ClientError) + +@mock_sns +def test_create_topic_must_meet_constraints(): + conn = boto3.client("sns", region_name="us-east-1") + common_random_chars = [':', ";", "!", "@", "|", "^", "%"] + for char in common_random_chars: + conn.create_topic.when.called_with( + Name="no%s_invalidchar" % char).should.throw(ClientError) + conn.create_topic.when.called_with( + Name="no spaces allowed").should.throw(ClientError) + + +@mock_sns +def test_create_topic_should_be_of_certain_length(): + conn = boto3.client("sns", region_name="us-east-1") + too_short = "" + conn.create_topic.when.called_with( + Name=too_short).should.throw(ClientError) + too_long = "x" * 257 + conn.create_topic.when.called_with( + Name=too_long).should.throw(ClientError) + + +@mock_sns +def test_create_topic_in_multiple_regions(): + for region in ['us-west-1', 'us-west-2']: + conn = boto3.client("sns", region_name=region) + conn.create_topic(Name="some-topic") + list(conn.list_topics()["Topics"]).should.have.length_of(1) + + +@mock_sns +def test_topic_corresponds_to_region(): + for region in ['us-east-1', 'us-west-2']: + conn = boto3.client("sns", region_name=region) + conn.create_topic(Name="some-topic") + topics_json = conn.list_topics() + topic_arn = topics_json["Topics"][0]['TopicArn'] + topic_arn.should.equal( + "arn:aws:sns:{0}:123456789012:some-topic".format(region)) + + +@mock_sns +def test_topic_attributes(): + conn = boto3.client("sns", region_name="us-east-1") + conn.create_topic(Name="some-topic") + + topics_json = conn.list_topics() + topic_arn = topics_json["Topics"][0]['TopicArn'] + + attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] + attributes["TopicArn"].should.equal( + "arn:aws:sns:{0}:123456789012:some-topic" + .format(conn._client_config.region_name) + ) + attributes["Owner"].should.equal('123456789012') + json.loads(attributes["Policy"]).should.equal(DEFAULT_TOPIC_POLICY) + attributes["DisplayName"].should.equal("") + attributes["SubscriptionsPending"].should.equal('0') + attributes["SubscriptionsConfirmed"].should.equal('0') + attributes["SubscriptionsDeleted"].should.equal('0') + attributes["DeliveryPolicy"].should.equal("") + json.loads(attributes["EffectiveDeliveryPolicy"]).should.equal( + DEFAULT_EFFECTIVE_DELIVERY_POLICY) + + # boto can't handle prefix-mandatory strings: + # i.e. unicode on Python 2 -- u"foobar" + # and bytes on Python 3 -- b"foobar" + if six.PY2: + policy = json.dumps({b"foo": b"bar"}) + displayname = b"My display name" + delivery = json.dumps( + {b"http": {b"defaultHealthyRetryPolicy": {b"numRetries": 5}}}) + else: + policy = json.dumps({u"foo": u"bar"}) + displayname = u"My display name" + delivery = json.dumps( + {u"http": {u"defaultHealthyRetryPolicy": {u"numRetries": 5}}}) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="Policy", + AttributeValue=policy) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue=displayname) + conn.set_topic_attributes(TopicArn=topic_arn, + AttributeName="DeliveryPolicy", + AttributeValue=delivery) + + attributes = conn.get_topic_attributes(TopicArn=topic_arn)['Attributes'] + attributes["Policy"].should.equal('{"foo": "bar"}') + attributes["DisplayName"].should.equal("My display name") + attributes["DeliveryPolicy"].should.equal( + '{"http": {"defaultHealthyRetryPolicy": {"numRetries": 5}}}') + + +@mock_sns +def test_topic_paging(): + conn = boto3.client("sns", region_name="us-east-1") + for index in range(DEFAULT_PAGE_SIZE + int(DEFAULT_PAGE_SIZE / 2)): + conn.create_topic(Name="some-topic_" + str(index)) + + response = conn.list_topics() + topics_list = response["Topics"] + next_token = response["NextToken"] + + len(topics_list).should.equal(DEFAULT_PAGE_SIZE) + int(next_token).should.equal(DEFAULT_PAGE_SIZE) + + response = conn.list_topics(NextToken=next_token) + topics_list = response["Topics"] + response.shouldnt.have("NextToken") + + topics_list.should.have.length_of(int(DEFAULT_PAGE_SIZE / 2)) + + +@mock_sns +def test_add_remove_permissions(): + conn = boto3.client('sns', region_name='us-east-1') + response = conn.create_topic(Name='testpermissions') + + conn.add_permission( + TopicArn=response['TopicArn'], + Label='Test1234', + AWSAccountId=['999999999999'], + ActionName=['AddPermission'] + ) + conn.remove_permission( + TopicArn=response['TopicArn'], + Label='Test1234' + ) diff --git a/tests/test_sqs/test_server.py b/tests/test_sqs/test_server.py index e7f745fd2..b2b233bde 100644 --- a/tests/test_sqs/test_server.py +++ b/tests/test_sqs/test_server.py @@ -1,85 +1,85 @@ -from __future__ import unicode_literals - -import re -import sure # noqa -import threading -import time - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sqs_list_identities(): - backend = server.create_backend_app("sqs") - test_client = backend.test_client() - - res = test_client.get('/?Action=ListQueues') - res.data.should.contain(b"ListQueuesResponse") - - # Make sure that we can receive messages from queues whose name contains dots (".") - # The AWS API mandates that the names of FIFO queues use the suffix ".fifo" - # See: https://github.com/spulec/moto/issues/866 - - for queue_name in ('testqueue', 'otherqueue.fifo'): - - res = test_client.put('/?Action=CreateQueue&QueueName=%s' % queue_name) - - - res = test_client.put( - '/123/%s?MessageBody=test-message&Action=SendMessage' % queue_name) - - res = test_client.get( - '/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1' % queue_name) - - message = re.search("(.*?)", - res.data.decode('utf-8')).groups()[0] - message.should.equal('test-message') - - res = test_client.get('/?Action=ListQueues&QueueNamePrefix=other') - res.data.should.contain(b'otherqueue.fifo') - res.data.should_not.contain(b'testqueue') - - -def test_messages_polling(): - backend = server.create_backend_app("sqs") - test_client = backend.test_client() - messages = [] - - test_client.put('/?Action=CreateQueue&QueueName=testqueue') - - def insert_messages(): - messages_count = 5 - while messages_count > 0: - test_client.put( - '/123/testqueue?MessageBody=test-message&Action=SendMessage' - '&Attribute.1.Name=WaitTimeSeconds&Attribute.1.Value=10' - ) - messages_count -= 1 - time.sleep(.5) - - def get_messages(): - count = 0 - while count < 5: - msg_res = test_client.get( - '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' - ) - new_msgs = re.findall("(.*?)", - msg_res.data.decode('utf-8')) - count += len(new_msgs) - messages.append(new_msgs) - - get_messages_thread = threading.Thread(target=get_messages) - insert_messages_thread = threading.Thread(target=insert_messages) - - get_messages_thread.start() - insert_messages_thread.start() - - get_messages_thread.join() - insert_messages_thread.join() - - # got each message in a separate call to ReceiveMessage, despite the long - # WaitTimeSeconds - assert len(messages) == 5 +from __future__ import unicode_literals + +import re +import sure # noqa +import threading +import time + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sqs_list_identities(): + backend = server.create_backend_app("sqs") + test_client = backend.test_client() + + res = test_client.get('/?Action=ListQueues') + res.data.should.contain(b"ListQueuesResponse") + + # Make sure that we can receive messages from queues whose name contains dots (".") + # The AWS API mandates that the names of FIFO queues use the suffix ".fifo" + # See: https://github.com/spulec/moto/issues/866 + + for queue_name in ('testqueue', 'otherqueue.fifo'): + + res = test_client.put('/?Action=CreateQueue&QueueName=%s' % queue_name) + + + res = test_client.put( + '/123/%s?MessageBody=test-message&Action=SendMessage' % queue_name) + + res = test_client.get( + '/123/%s?Action=ReceiveMessage&MaxNumberOfMessages=1' % queue_name) + + message = re.search("(.*?)", + res.data.decode('utf-8')).groups()[0] + message.should.equal('test-message') + + res = test_client.get('/?Action=ListQueues&QueueNamePrefix=other') + res.data.should.contain(b'otherqueue.fifo') + res.data.should_not.contain(b'testqueue') + + +def test_messages_polling(): + backend = server.create_backend_app("sqs") + test_client = backend.test_client() + messages = [] + + test_client.put('/?Action=CreateQueue&QueueName=testqueue') + + def insert_messages(): + messages_count = 5 + while messages_count > 0: + test_client.put( + '/123/testqueue?MessageBody=test-message&Action=SendMessage' + '&Attribute.1.Name=WaitTimeSeconds&Attribute.1.Value=10' + ) + messages_count -= 1 + time.sleep(.5) + + def get_messages(): + count = 0 + while count < 5: + msg_res = test_client.get( + '/123/testqueue?Action=ReceiveMessage&MaxNumberOfMessages=1&WaitTimeSeconds=5' + ) + new_msgs = re.findall("(.*?)", + msg_res.data.decode('utf-8')) + count += len(new_msgs) + messages.append(new_msgs) + + get_messages_thread = threading.Thread(target=get_messages) + insert_messages_thread = threading.Thread(target=insert_messages) + + get_messages_thread.start() + insert_messages_thread.start() + + get_messages_thread.join() + insert_messages_thread.join() + + # got each message in a separate call to ReceiveMessage, despite the long + # WaitTimeSeconds + assert len(messages) == 5 diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9beb9a3fa..f070625c0 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,1237 +1,1237 @@ -# -*- coding: utf-8 -*- -from __future__ import unicode_literals -import os - -import boto -import boto3 -import botocore.exceptions -from botocore.exceptions import ClientError -from boto.exception import SQSError -from boto.sqs.message import RawMessage, Message - -from freezegun import freeze_time -import base64 -import json -import sure # noqa -import time -import uuid - -from moto import settings, mock_sqs, mock_sqs_deprecated -from tests.helpers import requires_boto_gte -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises -from nose import SkipTest - - -@mock_sqs -def test_create_fifo_queue_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'FifoQueue': 'true', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('InvalidParameterValue') - else: - raise RuntimeError('Should of raised InvalidParameterValue Exception') - - -@mock_sqs -def test_create_queue_with_same_attributes(): - sqs = boto3.client('sqs', region_name='us-east-1') - - dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] - dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] - - attributes = { - 'DelaySeconds': '900', - 'MaximumMessageSize': '262144', - 'MessageRetentionPeriod': '1209600', - 'ReceiveMessageWaitTimeSeconds': '20', - 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), - 'VisibilityTimeout': '43200' - } - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - sqs.create_queue( - QueueName='test-queue', - Attributes=attributes - ) - - -@mock_sqs -def test_create_queue_with_different_attributes_fail(): - sqs = boto3.client('sqs', region_name='us-east-1') - - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '10', - } - ) - try: - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'VisibilityTimeout': '60', - } - ) - except botocore.exceptions.ClientError as err: - err.response['Error']['Code'].should.equal('QueueAlreadyExists') - else: - raise RuntimeError('Should of raised QueueAlreadyExists Exception') - - -@mock_sqs -def test_create_fifo_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - } - ) - queue_url = resp['QueueUrl'] - - response = sqs.get_queue_attributes(QueueUrl=queue_url) - response['Attributes'].should.contain('FifoQueue') - response['Attributes']['FifoQueue'].should.equal('true') - - -@mock_sqs -def test_create_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue(QueueName='test-queue') - new_queue.should_not.be.none - new_queue.should.have.property('url').should.contain('test-queue') - - queue = sqs.get_queue_by_name(QueueName='test-queue') - queue.attributes.get('QueueArn').should_not.be.none - queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') - queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') - queue.attributes.get('VisibilityTimeout').should_not.be.none - queue.attributes.get('VisibilityTimeout').should.equal('30') - - -@mock_sqs -def test_create_queue_kms(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - new_queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'KmsMasterKeyId': 'master-key-id', - 'KmsDataKeyReusePeriodSeconds': '600' - }) - new_queue.should_not.be.none - - queue = sqs.get_queue_by_name(QueueName='test-queue') - - queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') - queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') - - -@mock_sqs -def test_get_nonexistent_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - with assert_raises(ClientError) as err: - sqs.get_queue_by_name(QueueName='nonexisting-queue') - ex = err.exception - ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - with assert_raises(ClientError) as err: - sqs.Queue('http://whatever-incorrect-queue-address').load() - ex = err.exception - ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal( - 'AWS.SimpleQueueService.NonExistentQueue') - - -@mock_sqs -def test_message_send_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp" - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.shouldnt.have.key('MD5OfMessageAttributes') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_send_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '235c5c510d26fb653d073faed50ae77c') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_message_with_complex_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message( - MessageBody="derp", - MessageAttributes={ - 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, - 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, - 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, - 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} - } - ) - msg.get('MD5OfMessageBody').should.equal( - '58fd9edd83341c29f1aebba81c31e257') - msg.get('MD5OfMessageAttributes').should.equal( - '8ae21a7957029ef04146b42aeaa18a22') - msg.get('MessageId').should_not.contain(' \n') - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_send_message_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-group-id.fifo", - Attributes={'FifoQueue': 'true'}) - - sent = queue.send_message( - MessageBody="mydata", - MessageDeduplicationId="dedupe_id_1", - MessageGroupId="group_id_1", - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - - message_attributes = messages[0].attributes - message_attributes.should.contain('MessageGroupId') - message_attributes['MessageGroupId'].should.equal('group_id_1') - message_attributes.should.contain('MessageDeduplicationId') - message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') - - -@mock_sqs -def test_send_message_with_unicode_characters(): - body_one = 'Héllo!😀' - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - msg = queue.send_message(MessageBody=body_one) - - messages = queue.receive_messages() - message_body = messages[0].body - - message_body.should.equal(body_one) - - -@mock_sqs -def test_set_queue_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - queue.attributes['VisibilityTimeout'].should.equal("30") - - queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) - queue.attributes['VisibilityTimeout'].should.equal("45") - - -@mock_sqs -def test_create_queues_in_multiple_region(): - west1_conn = boto3.client('sqs', region_name='us-west-1') - west1_conn.create_queue(QueueName="blah") - - west2_conn = boto3.client('sqs', region_name='us-west-2') - west2_conn.create_queue(QueueName="test-queue") - - list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) - list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - west1_conn.list_queues()['QueueUrls'][0].should.equal( - '{base_url}/123456789012/blah'.format(base_url=base_url)) - - -@mock_sqs -def test_get_queue_with_prefix(): - conn = boto3.client("sqs", region_name='us-west-1') - conn.create_queue(QueueName="prefixa-queue") - conn.create_queue(QueueName="prefixb-queue") - conn.create_queue(QueueName="test-queue") - - conn.list_queues()['QueueUrls'].should.have.length_of(3) - - queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] - queue.should.have.length_of(1) - - if settings.TEST_SERVER_MODE: - base_url = 'http://localhost:5000' - else: - base_url = 'https://us-west-1.queue.amazonaws.com' - - queue[0].should.equal( - "{base_url}/123456789012/test-queue".format(base_url=base_url)) - - -@mock_sqs -def test_delete_queue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": "3"}) - queue = sqs.Queue('test-queue') - - conn.list_queues()['QueueUrls'].should.have.length_of(1) - - queue.delete() - conn.list_queues().get('QueueUrls').should.equal(None) - - with assert_raises(botocore.exceptions.ClientError): - queue.delete() - - -@mock_sqs -def test_set_queue_attribute(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue", - Attributes={"VisibilityTimeout": '3'}) - - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('3') - - queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) - queue = sqs.Queue("test-queue") - queue.attributes['VisibilityTimeout'].should.equal('45') - - -@mock_sqs -def test_send_receive_message_without_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message(MessageBody=body_one) - queue.send_message(MessageBody=body_two) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1['Body'].should.equal(body_one) - message2['Body'].should.equal(body_two) - - message1.shouldnt.have.key('MD5OfMessageAttributes') - message2.shouldnt.have.key('MD5OfMessageAttributes') - - -@mock_sqs -def test_send_receive_message_with_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.send_message( - MessageBody=body_one, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359900', - 'DataType': 'Number', - } - } - ) - - queue.send_message( - MessageBody=body_two, - MessageAttributes={ - 'timestamp': { - 'StringValue': '1493147359901', - 'DataType': 'Number', - } - } - ) - - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] - - message1 = messages[0] - message2 = messages[1] - - message1.get('Body').should.equal(body_one) - message2.get('Body').should.equal(body_two) - - message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') - message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') - - -@mock_sqs -def test_send_receive_message_timestamps(): - sqs = boto3.resource('sqs', region_name='us-east-1') - conn = boto3.client("sqs", region_name='us-east-1') - conn.create_queue(QueueName="test-queue") - queue = sqs.Queue("test-queue") - - queue.send_message(MessageBody="derp") - messages = conn.receive_message( - QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] - - message = messages[0] - sent_timestamp = message.get('Attributes').get('SentTimestamp') - approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') - - int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) - int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) - - -@mock_sqs -def test_max_number_of_messages_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=11) - - with assert_raises(ClientError): - queue.receive_messages(MaxNumberOfMessages=0) - - # no error but also no messages returned - queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) - - -@mock_sqs -def test_wait_time_seconds_invalid_param(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName='test-queue') - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=-1) - - with assert_raises(ClientError): - queue.receive_messages(WaitTimeSeconds=21) - - # no error but also no messages returned - queue.receive_messages(WaitTimeSeconds=0) - - -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_zero(): - """ - test that zero messages is returned with a wait_seconds_timeout of zero, - previously this created an infinite loop and nothing was returned - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.equal([]) - - -@mock_sqs_deprecated -def test_send_message_with_xml_characters(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = '< & >' - - queue.write(queue.new_message(body_one)) - - messages = conn.receive_message(queue, number_messages=1) - - messages[0].get_body().should.equal(body_one) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_message_with_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body = 'this is a test message' - message = queue.new_message(body) - BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') - message_attributes = { - 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, - 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, - 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} - } - message.message_attributes = message_attributes - - queue.write(message) - - messages = conn.receive_message(queue) - - messages[0].get_body().should.equal(body) - - for name, value in message_attributes.items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_send_message_with_delay(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - body_two = 'this is another test message' - - queue.write(queue.new_message(body_one), delay_seconds=3) - queue.write(queue.new_message(body_two)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=2) - assert len(messages) == 1 - message = messages[0] - assert message.get_body().should.equal(body_two) - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_large_message_fails(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'test message' * 200000 - huge_message = queue.new_message(body_one) - - queue.write.when.called_with(huge_message).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_message_becomes_inflight_when_received(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is a test message' - queue.write(queue.new_message(body_one)) - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - # Wait - time.sleep(3) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_receive_message_with_explicit_visibility_timeout(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message( - queue, number_messages=1, visibility_timeout=0) - - assert len(messages) == 1 - - # Message should remain visible - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_change_message_visibility(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - queue.count().should.equal(0) - - messages[0].change_visibility(2) - - # Wait - time.sleep(1) - - # Message is not visible - queue.count().should.equal(0) - - time.sleep(2) - - # Message now becomes visible - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=2) - queue.set_message_class(RawMessage) - - body_one = 'this is another test message' - queue.write(queue.new_message(body_one)) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - queue.count().should.equal(0) - - assert len(messages) == 1 - - message_attributes = messages[0].attributes - - assert message_attributes.get('ApproximateFirstReceiveTimestamp') - assert int(message_attributes.get('ApproximateReceiveCount')) == 1 - assert message_attributes.get('SentTimestamp') - assert message_attributes.get('SenderId') - - -@mock_sqs_deprecated -def test_read_message_from_queue(): - conn = boto.connect_sqs() - queue = conn.create_queue('testqueue') - queue.set_message_class(RawMessage) - - body = 'foo bar baz' - queue.write(queue.new_message(body)) - message = queue.read(1) - message.get_body().should.equal(body) - - -@mock_sqs_deprecated -def test_queue_length(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - -@mock_sqs_deprecated -def test_delete_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is a test message')) - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(2) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - assert len(messages) == 1 - messages[0].delete() - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_send_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - # See https://github.com/boto/boto/issues/831 - queue.set_message_class(RawMessage) - - queue.write_batch([ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(3) - messages[0].get_body().should.equal("test message 1") - - # Test that pulling more messages doesn't break anything - messages = queue.get_messages(2) - - -@requires_boto_gte("2.28") -@mock_sqs_deprecated -def test_send_batch_operation_with_message_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - queue.set_message_class(RawMessage) - - message_tuple = ("my_first_message", 'test message 1', 0, { - 'name1': {'data_type': 'String', 'string_value': 'foo'}}) - queue.write_batch([message_tuple]) - - messages = queue.get_messages() - messages[0].get_body().should.equal("test message 1") - - for name, value in message_tuple[3].items(): - dict(messages[0].message_attributes[name]).should.equal(value) - - -@mock_sqs_deprecated -def test_delete_batch_operation(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=3) - - conn.send_message_batch(queue, [ - ("my_first_message", 'test message 1', 0), - ("my_second_message", 'test message 2', 0), - ("my_third_message", 'test message 3', 0), - ]) - - messages = queue.get_messages(2) - queue.delete_message_batch(messages) - - queue.count().should.equal(1) - - -@mock_sqs_deprecated -def test_queue_attributes(): - conn = boto.connect_sqs('the_key', 'the_secret') - - queue_name = 'test-queue' - visibility_timeout = 3 - - queue = conn.create_queue( - queue_name, visibility_timeout=visibility_timeout) - - attributes = queue.get_attributes() - - attributes['QueueArn'].should.look_like( - 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) - - attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) - - attribute_names = queue.get_attributes().keys() - attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') - attribute_names.should.contain('MessageRetentionPeriod') - attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') - attribute_names.should.contain('MaximumMessageSize') - attribute_names.should.contain('CreatedTimestamp') - attribute_names.should.contain('ApproximateNumberOfMessages') - attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') - attribute_names.should.contain('DelaySeconds') - attribute_names.should.contain('VisibilityTimeout') - attribute_names.should.contain('LastModifiedTimestamp') - attribute_names.should.contain('QueueArn') - - -@mock_sqs_deprecated -def test_change_message_visibility_on_invalid_receipt(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_change_message_visibility_on_visible_message(): - conn = boto.connect_sqs('the_key', 'the_secret') - queue = conn.create_queue("test-queue", visibility_timeout=1) - queue.set_message_class(RawMessage) - - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - messages = conn.receive_message(queue, number_messages=1) - - assert len(messages) == 1 - - original_message = messages[0] - - queue.count().should.equal(0) - - time.sleep(2) - - queue.count().should.equal(1) - - original_message.change_visibility.when.called_with( - 100).should.throw(SQSError) - - -@mock_sqs_deprecated -def test_purge_action(): - conn = boto.sqs.connect_to_region("us-east-1") - - queue = conn.create_queue('new-queue') - queue.write(queue.new_message('this is another test message')) - queue.count().should.equal(1) - - queue.purge() - - queue.count().should.equal(0) - - -@mock_sqs_deprecated -def test_delete_message_after_visibility_timeout(): - VISIBILITY_TIMEOUT = 1 - conn = boto.sqs.connect_to_region("us-east-1") - new_queue = conn.create_queue( - 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) - - m1 = Message() - m1.set_body('Message 1!') - new_queue.write(m1) - - assert new_queue.count() == 1 - - m1_retrieved = new_queue.read() - - time.sleep(VISIBILITY_TIMEOUT + 1) - - m1_retrieved.delete() - - assert new_queue.count() == 0 - - -@mock_sqs -def test_batch_change_message_visibility(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') - sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') - - with freeze_time("2015-01-01 12:01:00"): - receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) - len(receive_resp['Messages']).should.equal(2) - - handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] - entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] - - resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) - len(resp['Successful']).should.equal(2) - - with freeze_time("2015-01-01 14:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-01 16:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(1) - - with freeze_time("2015-01-02 12:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) - len(resp['Messages']).should.equal(3) - - -@mock_sqs -def test_permissions(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) - - with assert_raises(ClientError): - client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) - - client.remove_permission(QueueUrl=queue_url, Label='account2') - - with assert_raises(ClientError): - client.remove_permission(QueueUrl=queue_url, Label='non_existant') - - -@mock_sqs -def test_tags(): - client = boto3.client('sqs', region_name='us-east-1') - - resp = client.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url = resp['QueueUrl'] - - client.tag_queue( - QueueUrl=queue_url, - Tags={ - 'test1': 'value1', - 'test2': 'value2', - } - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should.contain('test2') - - client.untag_queue( - QueueUrl=queue_url, - TagKeys=['test2'] - ) - - resp = client.list_queue_tags(QueueUrl=queue_url) - resp['Tags'].should.contain('test1') - resp['Tags'].should_not.contain('test2') - - -@mock_sqs -def test_create_fifo_queue_with_dlq(): - sqs = boto3.client('sqs', region_name='us-east-1') - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-dlr-queue', - Attributes={'FifoQueue': 'false'} - ) - queue_url2 = resp['QueueUrl'] - queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] - - sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - - # Cant have fifo queue with non fifo DLQ - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) - } - ) - - -@mock_sqs -def test_queue_with_dlq(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - sqs = boto3.client('sqs', region_name='us-east-1') - - with freeze_time("2015-01-01 12:00:00"): - resp = sqs.create_queue( - QueueName='test-dlr-queue.fifo', - Attributes={'FifoQueue': 'true'} - ) - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - - resp = sqs.create_queue( - QueueName='test-queue.fifo', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) - } - ) - queue_url2 = resp['QueueUrl'] - - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') - sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') - - with freeze_time("2015-01-01 13:00:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:01:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - with freeze_time("2015-01-01 13:02:00"): - resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) - len(resp['Messages']).should.equal(1) - - resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) - resp['Messages'][0]['Body'].should.equal('msg1') - - # Might as well test list source queues - - resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) - resp['queueUrls'][0].should.equal(queue_url2) - - -@mock_sqs -def test_redrive_policy_available(): - sqs = boto3.client('sqs', region_name='us-east-1') - - resp = sqs.create_queue(QueueName='test-deadletter') - queue_url1 = resp['QueueUrl'] - queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] - redrive_policy = { - 'deadLetterTargetArn': queue_arn1, - 'maxReceiveCount': 1, - } - - resp = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - queue_url2 = resp['QueueUrl'] - attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] - assert 'RedrivePolicy' in attributes - assert json.loads(attributes['RedrivePolicy']) == redrive_policy - - # Cant have redrive policy without maxReceiveCount - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue2', - Attributes={ - 'FifoQueue': 'true', - 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) - } - ) - - -@mock_sqs -def test_redrive_policy_non_existent_queue(): - sqs = boto3.client('sqs', region_name='us-east-1') - redrive_policy = { - 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', - 'maxReceiveCount': 1, - } - - with assert_raises(ClientError): - sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy) - } - ) - - -@mock_sqs -def test_redrive_policy_set_attributes(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue(QueueName='test-queue') - deadletter_queue = sqs.create_queue(QueueName='test-deadletter') - - redrive_policy = { - 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], - 'maxReceiveCount': 1, - } - - queue.set_attributes(Attributes={ - 'RedrivePolicy': json.dumps(redrive_policy)}) - - copy = sqs.get_queue_by_name(QueueName='test-queue') - assert 'RedrivePolicy' in copy.attributes - copy_policy = json.loads(copy.attributes['RedrivePolicy']) - assert copy_policy == redrive_policy - - -@mock_sqs -def test_receive_messages_with_message_group_id(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now processed, next one should be available - message.delete() - messages = queue.receive_messages() - messages.should.have.length_of(1) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_requeue(): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - # message is now available again, next one should be available - message.change_visibility(VisibilityTimeout=0) - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - - -@mock_sqs -def test_receive_messages_with_message_group_id_on_visibility_timeout(): - if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': - raise SkipTest('Cant manipulate time in server mode') - - with freeze_time("2015-01-01 12:00:00"): - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="test-queue.fifo", - Attributes={ - 'FifoQueue': 'true', - }) - queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) - queue.send_message( - MessageBody="message-1", - MessageGroupId="group" - ) - queue.send_message( - MessageBody="message-2", - MessageGroupId="group" - ) - - messages = queue.receive_messages() - messages.should.have.length_of(1) - message = messages[0] - - # received message is not deleted! - - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - message.change_visibility(VisibilityTimeout=10) - - with freeze_time("2015-01-01 12:00:05"): - # no timeout yet - messages = queue.receive_messages(WaitTimeSeconds=0) - messages.should.have.length_of(0) - - with freeze_time("2015-01-01 12:00:15"): - # message is now available again, next one should be available - messages = queue.receive_messages() - messages.should.have.length_of(1) - messages[0].message_id.should.equal(message.message_id) - -@mock_sqs -def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): - sqs = boto3.resource('sqs', region_name='us-east-1') - - queue = sqs.create_queue( - QueueName='test-queue', - Attributes={ - 'ReceiveMessageWaitTimeSeconds': '2', - } - ) - - queue.receive_messages() +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +import os + +import boto +import boto3 +import botocore.exceptions +from botocore.exceptions import ClientError +from boto.exception import SQSError +from boto.sqs.message import RawMessage, Message + +from freezegun import freeze_time +import base64 +import json +import sure # noqa +import time +import uuid + +from moto import settings, mock_sqs, mock_sqs_deprecated +from tests.helpers import requires_boto_gte +import tests.backport_assert_raises # noqa +from nose.tools import assert_raises +from nose import SkipTest + + +@mock_sqs +def test_create_fifo_queue_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'FifoQueue': 'true', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + else: + raise RuntimeError('Should of raised InvalidParameterValue Exception') + + +@mock_sqs +def test_create_queue_with_same_attributes(): + sqs = boto3.client('sqs', region_name='us-east-1') + + dlq_url = sqs.create_queue(QueueName='test-queue-dlq')['QueueUrl'] + dlq_arn = sqs.get_queue_attributes(QueueUrl=dlq_url)['Attributes']['QueueArn'] + + attributes = { + 'DelaySeconds': '900', + 'MaximumMessageSize': '262144', + 'MessageRetentionPeriod': '1209600', + 'ReceiveMessageWaitTimeSeconds': '20', + 'RedrivePolicy': '{"deadLetterTargetArn": "%s", "maxReceiveCount": 100}' % (dlq_arn), + 'VisibilityTimeout': '43200' + } + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + sqs.create_queue( + QueueName='test-queue', + Attributes=attributes + ) + + +@mock_sqs +def test_create_queue_with_different_attributes_fail(): + sqs = boto3.client('sqs', region_name='us-east-1') + + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '10', + } + ) + try: + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'VisibilityTimeout': '60', + } + ) + except botocore.exceptions.ClientError as err: + err.response['Error']['Code'].should.equal('QueueAlreadyExists') + else: + raise RuntimeError('Should of raised QueueAlreadyExists Exception') + + +@mock_sqs +def test_create_fifo_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + } + ) + queue_url = resp['QueueUrl'] + + response = sqs.get_queue_attributes(QueueUrl=queue_url) + response['Attributes'].should.contain('FifoQueue') + response['Attributes']['FifoQueue'].should.equal('true') + + +@mock_sqs +def test_create_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue(QueueName='test-queue') + new_queue.should_not.be.none + new_queue.should.have.property('url').should.contain('test-queue') + + queue = sqs.get_queue_by_name(QueueName='test-queue') + queue.attributes.get('QueueArn').should_not.be.none + queue.attributes.get('QueueArn').split(':')[-1].should.equal('test-queue') + queue.attributes.get('QueueArn').split(':')[3].should.equal('us-east-1') + queue.attributes.get('VisibilityTimeout').should_not.be.none + queue.attributes.get('VisibilityTimeout').should.equal('30') + + +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + +@mock_sqs +def test_get_nonexistent_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + with assert_raises(ClientError) as err: + sqs.get_queue_by_name(QueueName='nonexisting-queue') + ex = err.exception + ex.operation_name.should.equal('GetQueueUrl') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + with assert_raises(ClientError) as err: + sqs.Queue('http://whatever-incorrect-queue-address').load() + ex = err.exception + ex.operation_name.should.equal('GetQueueAttributes') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') + + +@mock_sqs +def test_message_send_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp" + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.shouldnt.have.key('MD5OfMessageAttributes') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_send_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '235c5c510d26fb653d073faed50ae77c') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_message_with_complex_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message( + MessageBody="derp", + MessageAttributes={ + 'ccc': {'StringValue': 'testjunk', 'DataType': 'String'}, + 'aaa': {'BinaryValue': b'\x02\x03\x04', 'DataType': 'Binary'}, + 'zzz': {'DataType': 'Number', 'StringValue': '0230.01'}, + 'öther_encodings': {'DataType': 'String', 'StringValue': 'T\xFCst'} + } + ) + msg.get('MD5OfMessageBody').should.equal( + '58fd9edd83341c29f1aebba81c31e257') + msg.get('MD5OfMessageAttributes').should.equal( + '8ae21a7957029ef04146b42aeaa18a22') + msg.get('MessageId').should_not.contain(' \n') + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + +@mock_sqs +def test_send_message_with_unicode_characters(): + body_one = 'Héllo!😀' + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + msg = queue.send_message(MessageBody=body_one) + + messages = queue.receive_messages() + message_body = messages[0].body + + message_body.should.equal(body_one) + + +@mock_sqs +def test_set_queue_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + queue.attributes['VisibilityTimeout'].should.equal("30") + + queue.set_attributes(Attributes={"VisibilityTimeout": "45"}) + queue.attributes['VisibilityTimeout'].should.equal("45") + + +@mock_sqs +def test_create_queues_in_multiple_region(): + west1_conn = boto3.client('sqs', region_name='us-west-1') + west1_conn.create_queue(QueueName="blah") + + west2_conn = boto3.client('sqs', region_name='us-west-2') + west2_conn.create_queue(QueueName="test-queue") + + list(west1_conn.list_queues()['QueueUrls']).should.have.length_of(1) + list(west2_conn.list_queues()['QueueUrls']).should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + west1_conn.list_queues()['QueueUrls'][0].should.equal( + '{base_url}/123456789012/blah'.format(base_url=base_url)) + + +@mock_sqs +def test_get_queue_with_prefix(): + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="prefixa-queue") + conn.create_queue(QueueName="prefixb-queue") + conn.create_queue(QueueName="test-queue") + + conn.list_queues()['QueueUrls'].should.have.length_of(3) + + queue = conn.list_queues(QueueNamePrefix="test-")['QueueUrls'] + queue.should.have.length_of(1) + + if settings.TEST_SERVER_MODE: + base_url = 'http://localhost:5000' + else: + base_url = 'https://us-west-1.queue.amazonaws.com' + + queue[0].should.equal( + "{base_url}/123456789012/test-queue".format(base_url=base_url)) + + +@mock_sqs +def test_delete_queue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": "3"}) + queue = sqs.Queue('test-queue') + + conn.list_queues()['QueueUrls'].should.have.length_of(1) + + queue.delete() + conn.list_queues().get('QueueUrls').should.equal(None) + + with assert_raises(botocore.exceptions.ClientError): + queue.delete() + + +@mock_sqs +def test_set_queue_attribute(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue", + Attributes={"VisibilityTimeout": '3'}) + + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('3') + + queue.set_attributes(Attributes={"VisibilityTimeout": '45'}) + queue = sqs.Queue("test-queue") + queue.attributes['VisibilityTimeout'].should.equal('45') + + +@mock_sqs +def test_send_receive_message_without_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message(MessageBody=body_one) + queue.send_message(MessageBody=body_two) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1['Body'].should.equal(body_one) + message2['Body'].should.equal(body_two) + + message1.shouldnt.have.key('MD5OfMessageAttributes') + message2.shouldnt.have.key('MD5OfMessageAttributes') + + +@mock_sqs +def test_send_receive_message_with_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.send_message( + MessageBody=body_one, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359900', + 'DataType': 'Number', + } + } + ) + + queue.send_message( + MessageBody=body_two, + MessageAttributes={ + 'timestamp': { + 'StringValue': '1493147359901', + 'DataType': 'Number', + } + } + ) + + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=2)['Messages'] + + message1 = messages[0] + message2 = messages[1] + + message1.get('Body').should.equal(body_one) + message2.get('Body').should.equal(body_two) + + message1.get('MD5OfMessageAttributes').should.equal('235c5c510d26fb653d073faed50ae77c') + message2.get('MD5OfMessageAttributes').should.equal('994258b45346a2cc3f9cbb611aa7af30') + + +@mock_sqs +def test_send_receive_message_timestamps(): + sqs = boto3.resource('sqs', region_name='us-east-1') + conn = boto3.client("sqs", region_name='us-east-1') + conn.create_queue(QueueName="test-queue") + queue = sqs.Queue("test-queue") + + queue.send_message(MessageBody="derp") + messages = conn.receive_message( + QueueUrl=queue.url, MaxNumberOfMessages=1)['Messages'] + + message = messages[0] + sent_timestamp = message.get('Attributes').get('SentTimestamp') + approximate_first_receive_timestamp = message.get('Attributes').get('ApproximateFirstReceiveTimestamp') + + int.when.called_with(sent_timestamp).shouldnt.throw(ValueError) + int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) + + +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + +@mock_sqs +def test_receive_messages_with_wait_seconds_timeout_of_zero(): + """ + test that zero messages is returned with a wait_seconds_timeout of zero, + previously this created an infinite loop and nothing was returned + :return: + """ + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="blah") + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.equal([]) + + +@mock_sqs_deprecated +def test_send_message_with_xml_characters(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = '< & >' + + queue.write(queue.new_message(body_one)) + + messages = conn.receive_message(queue, number_messages=1) + + messages[0].get_body().should.equal(body_one) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_message_with_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body = 'this is a test message' + message = queue.new_message(body) + BASE64_BINARY = base64.b64encode(b'binary value').decode('utf-8') + message_attributes = { + 'test.attribute_name': {'data_type': 'String', 'string_value': 'attribute value'}, + 'test.binary_attribute': {'data_type': 'Binary', 'binary_value': BASE64_BINARY}, + 'test.number_attribute': {'data_type': 'Number', 'string_value': 'string value'} + } + message.message_attributes = message_attributes + + queue.write(message) + + messages = conn.receive_message(queue) + + messages[0].get_body().should.equal(body) + + for name, value in message_attributes.items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_send_message_with_delay(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + body_two = 'this is another test message' + + queue.write(queue.new_message(body_one), delay_seconds=3) + queue.write(queue.new_message(body_two)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=2) + assert len(messages) == 1 + message = messages[0] + assert message.get_body().should.equal(body_two) + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_large_message_fails(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'test message' * 200000 + huge_message = queue.new_message(body_one) + + queue.write.when.called_with(huge_message).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_message_becomes_inflight_when_received(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is a test message' + queue.write(queue.new_message(body_one)) + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + # Wait + time.sleep(3) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_receive_message_with_explicit_visibility_timeout(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message( + queue, number_messages=1, visibility_timeout=0) + + assert len(messages) == 1 + + # Message should remain visible + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_change_message_visibility(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + queue.count().should.equal(0) + + messages[0].change_visibility(2) + + # Wait + time.sleep(1) + + # Message is not visible + queue.count().should.equal(0) + + time.sleep(2) + + # Message now becomes visible + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=2) + queue.set_message_class(RawMessage) + + body_one = 'this is another test message' + queue.write(queue.new_message(body_one)) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + queue.count().should.equal(0) + + assert len(messages) == 1 + + message_attributes = messages[0].attributes + + assert message_attributes.get('ApproximateFirstReceiveTimestamp') + assert int(message_attributes.get('ApproximateReceiveCount')) == 1 + assert message_attributes.get('SentTimestamp') + assert message_attributes.get('SenderId') + + +@mock_sqs_deprecated +def test_read_message_from_queue(): + conn = boto.connect_sqs() + queue = conn.create_queue('testqueue') + queue.set_message_class(RawMessage) + + body = 'foo bar baz' + queue.write(queue.new_message(body)) + message = queue.read(1) + message.get_body().should.equal(body) + + +@mock_sqs_deprecated +def test_queue_length(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + +@mock_sqs_deprecated +def test_delete_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is a test message')) + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(2) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + assert len(messages) == 1 + messages[0].delete() + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_send_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + # See https://github.com/boto/boto/issues/831 + queue.set_message_class(RawMessage) + + queue.write_batch([ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(3) + messages[0].get_body().should.equal("test message 1") + + # Test that pulling more messages doesn't break anything + messages = queue.get_messages(2) + + +@requires_boto_gte("2.28") +@mock_sqs_deprecated +def test_send_batch_operation_with_message_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + queue.set_message_class(RawMessage) + + message_tuple = ("my_first_message", 'test message 1', 0, { + 'name1': {'data_type': 'String', 'string_value': 'foo'}}) + queue.write_batch([message_tuple]) + + messages = queue.get_messages() + messages[0].get_body().should.equal("test message 1") + + for name, value in message_tuple[3].items(): + dict(messages[0].message_attributes[name]).should.equal(value) + + +@mock_sqs_deprecated +def test_delete_batch_operation(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=3) + + conn.send_message_batch(queue, [ + ("my_first_message", 'test message 1', 0), + ("my_second_message", 'test message 2', 0), + ("my_third_message", 'test message 3', 0), + ]) + + messages = queue.get_messages(2) + queue.delete_message_batch(messages) + + queue.count().should.equal(1) + + +@mock_sqs_deprecated +def test_queue_attributes(): + conn = boto.connect_sqs('the_key', 'the_secret') + + queue_name = 'test-queue' + visibility_timeout = 3 + + queue = conn.create_queue( + queue_name, visibility_timeout=visibility_timeout) + + attributes = queue.get_attributes() + + attributes['QueueArn'].should.look_like( + 'arn:aws:sqs:us-east-1:123456789012:%s' % queue_name) + + attributes['VisibilityTimeout'].should.look_like(str(visibility_timeout)) + + attribute_names = queue.get_attributes().keys() + attribute_names.should.contain('ApproximateNumberOfMessagesNotVisible') + attribute_names.should.contain('MessageRetentionPeriod') + attribute_names.should.contain('ApproximateNumberOfMessagesDelayed') + attribute_names.should.contain('MaximumMessageSize') + attribute_names.should.contain('CreatedTimestamp') + attribute_names.should.contain('ApproximateNumberOfMessages') + attribute_names.should.contain('ReceiveMessageWaitTimeSeconds') + attribute_names.should.contain('DelaySeconds') + attribute_names.should.contain('VisibilityTimeout') + attribute_names.should.contain('LastModifiedTimestamp') + attribute_names.should.contain('QueueArn') + + +@mock_sqs_deprecated +def test_change_message_visibility_on_invalid_receipt(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_change_message_visibility_on_visible_message(): + conn = boto.connect_sqs('the_key', 'the_secret') + queue = conn.create_queue("test-queue", visibility_timeout=1) + queue.set_message_class(RawMessage) + + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + messages = conn.receive_message(queue, number_messages=1) + + assert len(messages) == 1 + + original_message = messages[0] + + queue.count().should.equal(0) + + time.sleep(2) + + queue.count().should.equal(1) + + original_message.change_visibility.when.called_with( + 100).should.throw(SQSError) + + +@mock_sqs_deprecated +def test_purge_action(): + conn = boto.sqs.connect_to_region("us-east-1") + + queue = conn.create_queue('new-queue') + queue.write(queue.new_message('this is another test message')) + queue.count().should.equal(1) + + queue.purge() + + queue.count().should.equal(0) + + +@mock_sqs_deprecated +def test_delete_message_after_visibility_timeout(): + VISIBILITY_TIMEOUT = 1 + conn = boto.sqs.connect_to_region("us-east-1") + new_queue = conn.create_queue( + 'new-queue', visibility_timeout=VISIBILITY_TIMEOUT) + + m1 = Message() + m1.set_body('Message 1!') + new_queue.write(m1) + + assert new_queue.count() == 1 + + m1_retrieved = new_queue.read() + + time.sleep(VISIBILITY_TIMEOUT + 1) + + m1_retrieved.delete() + + assert new_queue.count() == 0 + + +@mock_sqs +def test_batch_change_message_visibility(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') + + with freeze_time("2015-01-01 12:01:00"): + receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) + len(receive_resp['Messages']).should.equal(2) + + handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] + entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] + + resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) + len(resp['Successful']).should.equal(2) + + with freeze_time("2015-01-01 14:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-01 16:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-02 12:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(3) + + +@mock_sqs +def test_permissions(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) + + with assert_raises(ClientError): + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) + + client.remove_permission(QueueUrl=queue_url, Label='account2') + + with assert_raises(ClientError): + client.remove_permission(QueueUrl=queue_url, Label='non_existant') + + +@mock_sqs +def test_tags(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.tag_queue( + QueueUrl=queue_url, + Tags={ + 'test1': 'value1', + 'test2': 'value2', + } + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should.contain('test2') + + client.untag_queue( + QueueUrl=queue_url, + TagKeys=['test2'] + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should_not.contain('test2') + + +@mock_sqs +def test_create_fifo_queue_with_dlq(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-dlr-queue', + Attributes={'FifoQueue': 'false'} + ) + queue_url2 = resp['QueueUrl'] + queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] + + sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + + # Cant have fifo queue with non fifo DLQ + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) + } + ) + + +@mock_sqs +def test_queue_with_dlq(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + sqs = boto3.client('sqs', region_name='us-east-1') + + with freeze_time("2015-01-01 12:00:00"): + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + queue_url2 = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') + + with freeze_time("2015-01-01 13:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:01:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:02:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + len(resp['Messages']).should.equal(1) + + resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + # Might as well test list source queues + + resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) + resp['queueUrls'][0].should.equal(queue_url2) + + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy + + +@mock_sqs +def test_receive_messages_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now processed, next one should be available + message.delete() + messages = queue.receive_messages() + messages.should.have.length_of(1) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_requeue(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + # message is now available again, next one should be available + message.change_visibility(VisibilityTimeout=0) + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + + +@mock_sqs +def test_receive_messages_with_message_group_id_on_visibility_timeout(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-queue.fifo", + Attributes={ + 'FifoQueue': 'true', + }) + queue.set_attributes(Attributes={"VisibilityTimeout": "3600"}) + queue.send_message( + MessageBody="message-1", + MessageGroupId="group" + ) + queue.send_message( + MessageBody="message-2", + MessageGroupId="group" + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + message = messages[0] + + # received message is not deleted! + + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + message.change_visibility(VisibilityTimeout=10) + + with freeze_time("2015-01-01 12:00:05"): + # no timeout yet + messages = queue.receive_messages(WaitTimeSeconds=0) + messages.should.have.length_of(0) + + with freeze_time("2015-01-01 12:00:15"): + # message is now available again, next one should be available + messages = queue.receive_messages() + messages.should.have.length_of(1) + messages[0].message_id.should.equal(message.message_id) + +@mock_sqs +def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'ReceiveMessageWaitTimeSeconds': '2', + } + ) + + queue.receive_messages() diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index f8ef3a237..7f25ac61b 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -1,786 +1,786 @@ -from __future__ import unicode_literals - -import boto3 -import botocore.exceptions -import sure # noqa -import datetime -import uuid -import json - -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -from moto import mock_ssm, mock_cloudformation - - -@mock_ssm -def test_delete_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(1) - - client.delete_parameter(Name='test') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(0) - - -@mock_ssm -def test_delete_parameters(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(1) - - result = client.delete_parameters(Names=['test', 'invalid']) - len(result['DeletedParameters']).should.equal(1) - len(result['InvalidParameters']).should.equal(1) - - response = client.get_parameters(Names=['test']) - len(response['Parameters']).should.equal(0) - - -@mock_ssm -def test_get_parameters_by_path(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='/foo/name1', - Description='A test parameter', - Value='value1', - Type='String') - - client.put_parameter( - Name='/foo/name2', - Description='A test parameter', - Value='value2', - Type='String') - - client.put_parameter( - Name='/bar/name3', - Description='A test parameter', - Value='value3', - Type='String') - - client.put_parameter( - Name='/bar/name3/name4', - Description='A test parameter', - Value='value4', - Type='String') - - client.put_parameter( - Name='/baz/name1', - Description='A test parameter (list)', - Value='value1,value2,value3', - Type='StringList') - - client.put_parameter( - Name='/baz/name2', - Description='A test parameter', - Value='value1', - Type='String') - - client.put_parameter( - Name='/baz/pwd', - Description='A secure test parameter', - Value='my_secret', - Type='SecureString', - KeyId='alias/aws/ssm') - - client.put_parameter( - Name='foo', - Description='A test parameter', - Value='bar', - Type='String') - - client.put_parameter( - Name='baz', - Description='A test parameter', - Value='qux', - Type='String') - - response = client.get_parameters_by_path(Path='/', Recursive=False) - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['bar', 'qux']) - ) - - response = client.get_parameters_by_path(Path='/', Recursive=True) - len(response['Parameters']).should.equal(9) - - response = client.get_parameters_by_path(Path='/foo') - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['value1', 'value2']) - ) - - response = client.get_parameters_by_path(Path='/bar', Recursive=False) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Value'].should.equal('value3') - - response = client.get_parameters_by_path(Path='/bar', Recursive=True) - len(response['Parameters']).should.equal(2) - {p['Value'] for p in response['Parameters']}.should.equal( - set(['value3', 'value4']) - ) - - response = client.get_parameters_by_path(Path='/baz') - len(response['Parameters']).should.equal(3) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['StringList'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1']) - ) - - # note: 'Option' is optional (default: 'Equals') - filters = [{ - 'Key': 'Type', - 'Values': ['StringList'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['String'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name2']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'Equals', - 'Values': ['String', 'SecureString'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(2) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name2', '/baz/pwd']) - ) - - filters = [{ - 'Key': 'Type', - 'Option': 'BeginsWith', - 'Values': ['String'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(2) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/name1', '/baz/name2']) - ) - - filters = [{ - 'Key': 'KeyId', - 'Option': 'Equals', - 'Values': ['alias/aws/ssm'], - }] - response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) - len(response['Parameters']).should.equal(1) - {p['Name'] for p in response['Parameters']}.should.equal( - set(['/baz/pwd']) - ) - - -@mock_ssm -def test_put_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - response = client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response['Version'].should.equal(1) - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(1) - - try: - client.put_parameter( - Name='test', - Description='desc 2', - Value='value 2', - Type='String') - raise RuntimeError('Should fail') - except botocore.exceptions.ClientError as err: - err.operation_name.should.equal('PutParameter') - err.response['Error']['Message'].should.equal('Parameter test already exists.') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - # without overwrite nothing change - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(1) - - response = client.put_parameter( - Name='test', - Description='desc 3', - Value='value 3', - Type='String', - Overwrite=True) - - response['Version'].should.equal(2) - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - # without overwrite nothing change - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value 3') - response['Parameters'][0]['Type'].should.equal('String') - response['Parameters'][0]['Version'].should.equal(2) - - -@mock_ssm -def test_get_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.get_parameter( - Name='test', - WithDecryption=False) - - response['Parameter']['Name'].should.equal('test') - response['Parameter']['Value'].should.equal('value') - response['Parameter']['Type'].should.equal('String') - - -@mock_ssm -def test_get_nonexistant_parameter(): - client = boto3.client('ssm', region_name='us-east-1') - - try: - client.get_parameter( - Name='test_noexist', - WithDecryption=False) - raise RuntimeError('Should of failed') - except botocore.exceptions.ClientError as err: - err.operation_name.should.equal('GetParameter') - err.response['Error']['Message'].should.equal('Parameter test_noexist not found.') - - -@mock_ssm -def test_describe_parameters(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='String') - - response = client.describe_parameters() - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Type'].should.equal('String') - - -@mock_ssm -def test_describe_parameters_paging(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - client.put_parameter( - Name="param-%d" % i, - Value="value-%d" % i, - Type="String" - ) - - response = client.describe_parameters() - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('10') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('20') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('30') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('40') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(10) - response['NextToken'].should.equal('50') - - response = client.describe_parameters(NextToken=response['NextToken']) - len(response['Parameters']).should.equal(0) - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_names(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = 'a key' - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'Name', - 'Values': ['param-22'] - }, - ]) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('param-22') - response['Parameters'][0]['Type'].should.equal('String') - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_type(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = 'a key' - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'Type', - 'Values': ['SecureString'] - }, - ]) - len(response['Parameters']).should.equal(10) - response['Parameters'][0]['Type'].should.equal('SecureString') - '10'.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_filter_keyid(): - client = boto3.client('ssm', region_name='us-east-1') - - for i in range(50): - p = { - 'Name': "param-%d" % i, - 'Value': "value-%d" % i, - 'Type': "String" - } - if i % 5 == 0: - p['Type'] = 'SecureString' - p['KeyId'] = "key:%d" % i - client.put_parameter(**p) - - response = client.describe_parameters(Filters=[ - { - 'Key': 'KeyId', - 'Values': ['key:10'] - }, - ]) - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('param-10') - response['Parameters'][0]['Type'].should.equal('SecureString') - ''.should.equal(response.get('NextToken', '')) - - -@mock_ssm -def test_describe_parameters_attributes(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='aa', - Value='11', - Type='String', - Description='my description' - ) - - client.put_parameter( - Name='bb', - Value='22', - Type='String' - ) - - response = client.describe_parameters() - len(response['Parameters']).should.equal(2) - - response['Parameters'][0]['Description'].should.equal('my description') - response['Parameters'][0]['Version'].should.equal(1) - response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) - response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') - - response['Parameters'][1].get('Description').should.be.none - response['Parameters'][1]['Version'].should.equal(1) - - -@mock_ssm -def test_get_parameter_invalid(): - client = client = boto3.client('ssm', region_name='us-east-1') - response = client.get_parameters( - Names=[ - 'invalid' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(0) - len(response['InvalidParameters']).should.equal(1) - response['InvalidParameters'][0].should.equal('invalid') - - -@mock_ssm -def test_put_parameter_secure_default_kms(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('kms:default:value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=True) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - -@mock_ssm -def test_put_parameter_secure_custom_kms(): - client = boto3.client('ssm', region_name='us-east-1') - - client.put_parameter( - Name='test', - Description='A test parameter', - Value='value', - Type='SecureString', - KeyId='foo') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=False) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('kms:foo:value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - response = client.get_parameters( - Names=[ - 'test' - ], - WithDecryption=True) - - len(response['Parameters']).should.equal(1) - response['Parameters'][0]['Name'].should.equal('test') - response['Parameters'][0]['Value'].should.equal('value') - response['Parameters'][0]['Type'].should.equal('SecureString') - - -@mock_ssm -def test_add_remove_list_tags_for_resource(): - client = boto3.client('ssm', region_name='us-east-1') - - client.add_tags_to_resource( - ResourceId='test', - ResourceType='Parameter', - Tags=[{'Key': 'test-key', 'Value': 'test-value'}] - ) - - response = client.list_tags_for_resource( - ResourceId='test', - ResourceType='Parameter' - ) - len(response['TagList']).should.equal(1) - response['TagList'][0]['Key'].should.equal('test-key') - response['TagList'][0]['Value'].should.equal('test-value') - - client.remove_tags_from_resource( - ResourceId='test', - ResourceType='Parameter', - TagKeys=['test-key'] - ) - - response = client.list_tags_for_resource( - ResourceId='test', - ResourceType='Parameter' - ) - len(response['TagList']).should.equal(0) - - -@mock_ssm -def test_send_command(): - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - client = boto3.client('ssm', region_name='us-east-1') - # note the timeout is determined server side, so this is a simpler check. - before = datetime.datetime.now() - - response = client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref' - ) - cmd = response['Command'] - - cmd['CommandId'].should_not.be(None) - cmd['DocumentName'].should.equal(ssm_document) - cmd['Parameters'].should.equal(params) - - cmd['OutputS3Region'].should.equal('us-east-2') - cmd['OutputS3BucketName'].should.equal('the-bucket') - cmd['OutputS3KeyPrefix'].should.equal('pref') - - cmd['ExpiresAfter'].should.be.greater_than(before) - - # test sending a command without any optional parameters - response = client.send_command( - DocumentName=ssm_document) - - cmd = response['Command'] - - cmd['CommandId'].should_not.be(None) - cmd['DocumentName'].should.equal(ssm_document) - - -@mock_ssm -def test_list_commands(): - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - - # get the command by id - response = client.list_commands( - CommandId=cmd_id) - - cmds = response['Commands'] - len(cmds).should.equal(1) - cmds[0]['CommandId'].should.equal(cmd_id) - - # add another command with the same instance id to test listing by - # instance id - client.send_command( - InstanceIds=['i-123456'], - DocumentName=ssm_document) - - response = client.list_commands( - InstanceId='i-123456') - - cmds = response['Commands'] - len(cmds).should.equal(2) - - for cmd in cmds: - cmd['InstanceIds'].should.contain('i-123456') - - # test the error case for an invalid command id - with assert_raises(ClientError): - response = client.list_commands( - CommandId=str(uuid.uuid4())) - -@mock_ssm -def test_get_command_invocation(): - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - InstanceIds=['i-123456', 'i-234567', 'i-345678'], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - - instance_id = 'i-345678' - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_id, - PluginName='aws:runShellScript') - - invocation_response['CommandId'].should.equal(cmd_id) - invocation_response['InstanceId'].should.equal(instance_id) - - # test the error case for an invalid instance id - with assert_raises(ClientError): - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId='i-FAKE') - - # test the error case for an invalid plugin name - with assert_raises(ClientError): - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_id, - PluginName='FAKE') - -@mock_ssm -@mock_cloudformation -def test_get_command_invocations_from_stack(): - stack_template = { - "AWSTemplateFormatVersion": "2010-09-09", - "Description": "Test Stack", - "Resources": { - "EC2Instance1": { - "Type": "AWS::EC2::Instance", - "Properties": { - "ImageId": "ami-test-image-id", - "KeyName": "test", - "InstanceType": "t2.micro", - "Tags": [ - { - "Key": "Test Description", - "Value": "Test tag" - }, - { - "Key": "Test Name", - "Value": "Name tag for tests" - } - ] - } - } - }, - "Outputs": { - "test": { - "Description": "Test Output", - "Value": "Test output value", - "Export": { - "Name": "Test value to export" - } - }, - "PublicIP": { - "Value": "Test public ip" - } - } - } - - cloudformation_client = boto3.client( - 'cloudformation', - region_name='us-east-1') - - stack_template_str = json.dumps(stack_template) - - response = cloudformation_client.create_stack( - StackName='test_stack', - TemplateBody=stack_template_str, - Capabilities=('CAPABILITY_IAM', )) - - client = boto3.client('ssm', region_name='us-east-1') - - ssm_document = 'AWS-RunShellScript' - params = {'commands': ['#!/bin/bash\necho \'hello world\'']} - - response = client.send_command( - Targets=[{ - 'Key': 'tag:aws:cloudformation:stack-name', - 'Values': ('test_stack', )}], - DocumentName=ssm_document, - Parameters=params, - OutputS3Region='us-east-2', - OutputS3BucketName='the-bucket', - OutputS3KeyPrefix='pref') - - cmd = response['Command'] - cmd_id = cmd['CommandId'] - instance_ids = cmd['InstanceIds'] - - invocation_response = client.get_command_invocation( - CommandId=cmd_id, - InstanceId=instance_ids[0], - PluginName='aws:runShellScript') +from __future__ import unicode_literals + +import boto3 +import botocore.exceptions +import sure # noqa +import datetime +import uuid +import json + +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_ssm, mock_cloudformation + + +@mock_ssm +def test_delete_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(1) + + client.delete_parameter(Name='test') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(0) + + +@mock_ssm +def test_delete_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(1) + + result = client.delete_parameters(Names=['test', 'invalid']) + len(result['DeletedParameters']).should.equal(1) + len(result['InvalidParameters']).should.equal(1) + + response = client.get_parameters(Names=['test']) + len(response['Parameters']).should.equal(0) + + +@mock_ssm +def test_get_parameters_by_path(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='/foo/name1', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/foo/name2', + Description='A test parameter', + Value='value2', + Type='String') + + client.put_parameter( + Name='/bar/name3', + Description='A test parameter', + Value='value3', + Type='String') + + client.put_parameter( + Name='/bar/name3/name4', + Description='A test parameter', + Value='value4', + Type='String') + + client.put_parameter( + Name='/baz/name1', + Description='A test parameter (list)', + Value='value1,value2,value3', + Type='StringList') + + client.put_parameter( + Name='/baz/name2', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/baz/pwd', + Description='A secure test parameter', + Value='my_secret', + Type='SecureString', + KeyId='alias/aws/ssm') + + client.put_parameter( + Name='foo', + Description='A test parameter', + Value='bar', + Type='String') + + client.put_parameter( + Name='baz', + Description='A test parameter', + Value='qux', + Type='String') + + response = client.get_parameters_by_path(Path='/', Recursive=False) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['bar', 'qux']) + ) + + response = client.get_parameters_by_path(Path='/', Recursive=True) + len(response['Parameters']).should.equal(9) + + response = client.get_parameters_by_path(Path='/foo') + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value1', 'value2']) + ) + + response = client.get_parameters_by_path(Path='/bar', Recursive=False) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Value'].should.equal('value3') + + response = client.get_parameters_by_path(Path='/bar', Recursive=True) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value3', 'value4']) + ) + + response = client.get_parameters_by_path(Path='/baz') + len(response['Parameters']).should.equal(3) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + # note: 'Option' is optional (default: 'Equals') + filters = [{ + 'Key': 'Type', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String', 'SecureString'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2', '/baz/pwd']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'BeginsWith', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1', '/baz/name2']) + ) + + filters = [{ + 'Key': 'KeyId', + 'Option': 'Equals', + 'Values': ['alias/aws/ssm'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/pwd']) + ) + + +@mock_ssm +def test_put_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + response = client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response['Version'].should.equal(1) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + try: + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + raise RuntimeError('Should fail') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('PutParameter') + err.response['Error']['Message'].should.equal('Parameter test already exists.') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + response = client.put_parameter( + Name='test', + Description='desc 3', + Value='value 3', + Type='String', + Overwrite=True) + + response['Version'].should.equal(2) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value 3') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(2) + + +@mock_ssm +def test_get_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.get_parameter( + Name='test', + WithDecryption=False) + + response['Parameter']['Name'].should.equal('test') + response['Parameter']['Value'].should.equal('value') + response['Parameter']['Type'].should.equal('String') + + +@mock_ssm +def test_get_nonexistant_parameter(): + client = boto3.client('ssm', region_name='us-east-1') + + try: + client.get_parameter( + Name='test_noexist', + WithDecryption=False) + raise RuntimeError('Should of failed') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('GetParameter') + err.response['Error']['Message'].should.equal('Parameter test_noexist not found.') + + +@mock_ssm +def test_describe_parameters(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='String') + + response = client.describe_parameters() + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Type'].should.equal('String') + + +@mock_ssm +def test_describe_parameters_paging(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + client.put_parameter( + Name="param-%d" % i, + Value="value-%d" % i, + Type="String" + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('10') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('20') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('30') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('40') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(10) + response['NextToken'].should.equal('50') + + response = client.describe_parameters(NextToken=response['NextToken']) + len(response['Parameters']).should.equal(0) + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_names(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Name', + 'Values': ['param-22'] + }, + ]) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('param-22') + response['Parameters'][0]['Type'].should.equal('String') + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_type(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = 'a key' + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'Type', + 'Values': ['SecureString'] + }, + ]) + len(response['Parameters']).should.equal(10) + response['Parameters'][0]['Type'].should.equal('SecureString') + '10'.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_filter_keyid(): + client = boto3.client('ssm', region_name='us-east-1') + + for i in range(50): + p = { + 'Name': "param-%d" % i, + 'Value': "value-%d" % i, + 'Type': "String" + } + if i % 5 == 0: + p['Type'] = 'SecureString' + p['KeyId'] = "key:%d" % i + client.put_parameter(**p) + + response = client.describe_parameters(Filters=[ + { + 'Key': 'KeyId', + 'Values': ['key:10'] + }, + ]) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('param-10') + response['Parameters'][0]['Type'].should.equal('SecureString') + ''.should.equal(response.get('NextToken', '')) + + +@mock_ssm +def test_describe_parameters_attributes(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='aa', + Value='11', + Type='String', + Description='my description' + ) + + client.put_parameter( + Name='bb', + Value='22', + Type='String' + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(2) + + response['Parameters'][0]['Description'].should.equal('my description') + response['Parameters'][0]['Version'].should.equal(1) + response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) + response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') + + response['Parameters'][1].get('Description').should.be.none + response['Parameters'][1]['Version'].should.equal(1) + + +@mock_ssm +def test_get_parameter_invalid(): + client = client = boto3.client('ssm', region_name='us-east-1') + response = client.get_parameters( + Names=[ + 'invalid' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(0) + len(response['InvalidParameters']).should.equal(1) + response['InvalidParameters'][0].should.equal('invalid') + + +@mock_ssm +def test_put_parameter_secure_default_kms(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('kms:default:value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=True) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + +@mock_ssm +def test_put_parameter_secure_custom_kms(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='test', + Description='A test parameter', + Value='value', + Type='SecureString', + KeyId='foo') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('kms:foo:value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=True) + + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('SecureString') + + +@mock_ssm +def test_add_remove_list_tags_for_resource(): + client = boto3.client('ssm', region_name='us-east-1') + + client.add_tags_to_resource( + ResourceId='test', + ResourceType='Parameter', + Tags=[{'Key': 'test-key', 'Value': 'test-value'}] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(1) + response['TagList'][0]['Key'].should.equal('test-key') + response['TagList'][0]['Value'].should.equal('test-value') + + client.remove_tags_from_resource( + ResourceId='test', + ResourceType='Parameter', + TagKeys=['test-key'] + ) + + response = client.list_tags_for_resource( + ResourceId='test', + ResourceType='Parameter' + ) + len(response['TagList']).should.equal(0) + + +@mock_ssm +def test_send_command(): + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + client = boto3.client('ssm', region_name='us-east-1') + # note the timeout is determined server side, so this is a simpler check. + before = datetime.datetime.now() + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref' + ) + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + cmd['Parameters'].should.equal(params) + + cmd['OutputS3Region'].should.equal('us-east-2') + cmd['OutputS3BucketName'].should.equal('the-bucket') + cmd['OutputS3KeyPrefix'].should.equal('pref') + + cmd['ExpiresAfter'].should.be.greater_than(before) + + # test sending a command without any optional parameters + response = client.send_command( + DocumentName=ssm_document) + + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + + +@mock_ssm +def test_list_commands(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + # get the command by id + response = client.list_commands( + CommandId=cmd_id) + + cmds = response['Commands'] + len(cmds).should.equal(1) + cmds[0]['CommandId'].should.equal(cmd_id) + + # add another command with the same instance id to test listing by + # instance id + client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document) + + response = client.list_commands( + InstanceId='i-123456') + + cmds = response['Commands'] + len(cmds).should.equal(2) + + for cmd in cmds: + cmd['InstanceIds'].should.contain('i-123456') + + # test the error case for an invalid command id + with assert_raises(ClientError): + response = client.list_commands( + CommandId=str(uuid.uuid4())) + +@mock_ssm +def test_get_command_invocation(): + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + InstanceIds=['i-123456', 'i-234567', 'i-345678'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + + instance_id = 'i-345678' + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='aws:runShellScript') + + invocation_response['CommandId'].should.equal(cmd_id) + invocation_response['InstanceId'].should.equal(instance_id) + + # test the error case for an invalid instance id + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId='i-FAKE') + + # test the error case for an invalid plugin name + with assert_raises(ClientError): + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_id, + PluginName='FAKE') + +@mock_ssm +@mock_cloudformation +def test_get_command_invocations_from_stack(): + stack_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Test Stack", + "Resources": { + "EC2Instance1": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-test-image-id", + "KeyName": "test", + "InstanceType": "t2.micro", + "Tags": [ + { + "Key": "Test Description", + "Value": "Test tag" + }, + { + "Key": "Test Name", + "Value": "Name tag for tests" + } + ] + } + } + }, + "Outputs": { + "test": { + "Description": "Test Output", + "Value": "Test output value", + "Export": { + "Name": "Test value to export" + } + }, + "PublicIP": { + "Value": "Test public ip" + } + } + } + + cloudformation_client = boto3.client( + 'cloudformation', + region_name='us-east-1') + + stack_template_str = json.dumps(stack_template) + + response = cloudformation_client.create_stack( + StackName='test_stack', + TemplateBody=stack_template_str, + Capabilities=('CAPABILITY_IAM', )) + + client = boto3.client('ssm', region_name='us-east-1') + + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + response = client.send_command( + Targets=[{ + 'Key': 'tag:aws:cloudformation:stack-name', + 'Values': ('test_stack', )}], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref') + + cmd = response['Command'] + cmd_id = cmd['CommandId'] + instance_ids = cmd['InstanceIds'] + + invocation_response = client.get_command_invocation( + CommandId=cmd_id, + InstanceId=instance_ids[0], + PluginName='aws:runShellScript') diff --git a/tests/test_sts/test_server.py b/tests/test_sts/test_server.py index 40260a49f..1cff6b0af 100644 --- a/tests/test_sts/test_server.py +++ b/tests/test_sts/test_server.py @@ -1,39 +1,39 @@ -from __future__ import unicode_literals -import sure # noqa - -import moto.server as server - -''' -Test the different server responses -''' - - -def test_sts_get_session_token(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetSessionToken') - res.status_code.should.equal(200) - res.data.should.contain(b"SessionToken") - res.data.should.contain(b"AccessKeyId") - - -def test_sts_get_federation_token(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetFederationToken&Name=Bob') - res.status_code.should.equal(200) - res.data.should.contain(b"SessionToken") - res.data.should.contain(b"AccessKeyId") - - -def test_sts_get_caller_identity(): - backend = server.create_backend_app("sts") - test_client = backend.test_client() - - res = test_client.get('/?Action=GetCallerIdentity') - res.status_code.should.equal(200) - res.data.should.contain(b"Arn") - res.data.should.contain(b"UserId") - res.data.should.contain(b"Account") +from __future__ import unicode_literals +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_sts_get_session_token(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetSessionToken') + res.status_code.should.equal(200) + res.data.should.contain(b"SessionToken") + res.data.should.contain(b"AccessKeyId") + + +def test_sts_get_federation_token(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetFederationToken&Name=Bob') + res.status_code.should.equal(200) + res.data.should.contain(b"SessionToken") + res.data.should.contain(b"AccessKeyId") + + +def test_sts_get_caller_identity(): + backend = server.create_backend_app("sts") + test_client = backend.test_client() + + res = test_client.get('/?Action=GetCallerIdentity') + res.status_code.should.equal(200) + res.data.should.contain(b"Arn") + res.data.should.contain(b"UserId") + res.data.should.contain(b"Account") diff --git a/tests/test_sts/test_sts.py b/tests/test_sts/test_sts.py index 4e0e52606..61ab76a29 100644 --- a/tests/test_sts/test_sts.py +++ b/tests/test_sts/test_sts.py @@ -1,84 +1,84 @@ -from __future__ import unicode_literals -import json - -import boto -import boto3 -from freezegun import freeze_time -import sure # noqa - -from moto import mock_sts, mock_sts_deprecated - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_get_session_token(): - conn = boto.connect_sts() - token = conn.get_session_token(duration=123) - - token.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.session_token.should.equal( - "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") - token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_get_federation_token(): - conn = boto.connect_sts() - token = conn.get_federation_token(duration=123, name="Bob") - - token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - token.credentials.session_token.should.equal( - "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") - token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - token.credentials.secret_key.should.equal( - "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - token.federated_user_arn.should.equal( - "arn:aws:sts::123456789012:federated-user/Bob") - token.federated_user_id.should.equal("123456789012:Bob") - - -@freeze_time("2012-01-01 12:00:00") -@mock_sts_deprecated -def test_assume_role(): - conn = boto.connect_sts() - - policy = json.dumps({ - "Statement": [ - { - "Sid": "Stmt13690092345534", - "Action": [ - "S3:ListBucket" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:s3:::foobar-tester" - ] - }, - ] - }) - s3_role = "arn:aws:iam::123456789012:role/test-role" - role = conn.assume_role(s3_role, "session-name", - policy, duration_seconds=123) - - credentials = role.credentials - credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') - credentials.session_token.should.equal( - "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") - credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") - credentials.secret_key.should.equal( - "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") - - role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") - role.user.assume_role_id.should.contain("session-name") - - -@mock_sts -def test_get_caller_identity(): - identity = boto3.client( - "sts", region_name='us-east-1').get_caller_identity() - - identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') - identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') - identity['Account'].should.equal('123456789012') +from __future__ import unicode_literals +import json + +import boto +import boto3 +from freezegun import freeze_time +import sure # noqa + +from moto import mock_sts, mock_sts_deprecated + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_get_session_token(): + conn = boto.connect_sts() + token = conn.get_session_token(duration=123) + + token.expiration.should.equal('2012-01-01T12:02:03.000Z') + token.session_token.should.equal( + "AQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + token.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + token.secret_key.should.equal("wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_get_federation_token(): + conn = boto.connect_sts() + token = conn.get_federation_token(duration=123, name="Bob") + + token.credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') + token.credentials.session_token.should.equal( + "AQoDYXdzEPT//////////wEXAMPLEtc764bNrC9SAPBSM22wDOk4x4HIZ8j4FZTwdQWLWsKWHGBuFqwAeMicRXmxfpSPfIeoIYRqTflfKD8YUuwthAx7mSEI/qkPpKPi/kMcGdQrmGdeehM4IC1NtBmUpp2wUE8phUZampKsburEDy0KPkyQDYwT7WZ0wq5VSXDvp75YU9HFvlRd8Tx6q6fE8YQcHNVXAkiY9q6d+xo0rKwT38xVqr7ZD0u0iPPkUL64lIZbqBAz+scqKmlzm8FDrypNC9Yjc8fPOLn9FX9KSYvKTr4rvx3iSIlTJabIQwj2ICCR/oLxBA==") + token.credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + token.credentials.secret_key.should.equal( + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + token.federated_user_arn.should.equal( + "arn:aws:sts::123456789012:federated-user/Bob") + token.federated_user_id.should.equal("123456789012:Bob") + + +@freeze_time("2012-01-01 12:00:00") +@mock_sts_deprecated +def test_assume_role(): + conn = boto.connect_sts() + + policy = json.dumps({ + "Statement": [ + { + "Sid": "Stmt13690092345534", + "Action": [ + "S3:ListBucket" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:s3:::foobar-tester" + ] + }, + ] + }) + s3_role = "arn:aws:iam::123456789012:role/test-role" + role = conn.assume_role(s3_role, "session-name", + policy, duration_seconds=123) + + credentials = role.credentials + credentials.expiration.should.equal('2012-01-01T12:02:03.000Z') + credentials.session_token.should.equal( + "BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4IgRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE") + credentials.access_key.should.equal("AKIAIOSFODNN7EXAMPLE") + credentials.secret_key.should.equal( + "aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY") + + role.user.arn.should.equal("arn:aws:iam::123456789012:role/test-role") + role.user.assume_role_id.should.contain("session-name") + + +@mock_sts +def test_get_caller_identity(): + identity = boto3.client( + "sts", region_name='us-east-1').get_caller_identity() + + identity['Arn'].should.equal('arn:aws:sts::123456789012:user/moto') + identity['UserId'].should.equal('AKIAIOSFODNN7EXAMPLE') + identity['Account'].should.equal('123456789012') diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 41c88cafe..dfcaf9801 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -1,154 +1,154 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.exceptions import SWFWorkflowExecutionClosedError -from moto.swf.models import ( - ActivityTask, - ActivityType, - Timeout, -) - -from ..utils import ( - ACTIVITY_TASK_TIMEOUTS, - make_workflow_execution, - process_first_timeout, -) - - -def test_activity_task_creation(): - wfe = make_workflow_execution() - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - workflow_execution=wfe, - timeouts=ACTIVITY_TASK_TIMEOUTS, - ) - task.workflow_execution.should.equal(wfe) - task.state.should.equal("SCHEDULED") - task.task_token.should_not.be.empty - task.started_event_id.should.be.none - - task.start(123) - task.state.should.equal("STARTED") - task.started_event_id.should.equal(123) - - task.complete() - task.state.should.equal("COMPLETED") - - # NB: this doesn't make any sense for SWF, a task shouldn't go from a - # "COMPLETED" state to a "FAILED" one, but this is an internal state on our - # side and we don't care about invalid state transitions for now. - task.fail() - task.state.should.equal("FAILED") - - -def test_activity_task_full_dict_representation(): - wfe = make_workflow_execution() - at = ActivityTask( - activity_id="my-activity-123", - activity_type=ActivityType("foo", "v1.0"), - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - at.start(1234) - - fd = at.to_full_dict() - fd["activityId"].should.equal("my-activity-123") - fd["activityType"]["version"].should.equal("v1.0") - fd["input"].should.equal("optional") - fd["startedEventId"].should.equal(1234) - fd.should.contain("taskToken") - fd["workflowExecution"].should.equal(wfe.to_short_dict()) - - at.start(1234) - fd = at.to_full_dict() - fd["startedEventId"].should.equal(1234) - - -def test_activity_task_reset_heartbeat_clock(): - wfe = make_workflow_execution() - - with freeze_time("2015-01-01 12:00:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - - task.last_heartbeat_timestamp.should.equal(1420113600.0) - - with freeze_time("2015-01-01 13:00:00"): - task.reset_heartbeat_clock() - - task.last_heartbeat_timestamp.should.equal(1420117200.0) - - -def test_activity_task_first_timeout(): - wfe = make_workflow_execution() - - with freeze_time("2015-01-01 12:00:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - task.first_timeout().should.be.none - - # activity task timeout is 300s == 5mins - with freeze_time("2015-01-01 12:06:00"): - task.first_timeout().should.be.a(Timeout) - process_first_timeout(task) - task.state.should.equal("TIMED_OUT") - task.timeout_type.should.equal("HEARTBEAT") - - -def test_activity_task_cannot_timeout_on_closed_workflow_execution(): - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution() - wfe.start() - - with freeze_time("2015-01-01 13:58:00"): - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - - with freeze_time("2015-01-01 14:10:00"): - task.first_timeout().should.be.a(Timeout) - wfe.first_timeout().should.be.a(Timeout) - process_first_timeout(wfe) - task.first_timeout().should.be.none - - -def test_activity_task_cannot_change_state_on_closed_workflow_execution(): - wfe = make_workflow_execution() - wfe.start() - - task = ActivityTask( - activity_id="my-activity-123", - activity_type="foo", - input="optional", - scheduled_event_id=117, - timeouts=ACTIVITY_TASK_TIMEOUTS, - workflow_execution=wfe, - ) - wfe.complete(123) - - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( - SWFWorkflowExecutionClosedError) - task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) - task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.exceptions import SWFWorkflowExecutionClosedError +from moto.swf.models import ( + ActivityTask, + ActivityType, + Timeout, +) + +from ..utils import ( + ACTIVITY_TASK_TIMEOUTS, + make_workflow_execution, + process_first_timeout, +) + + +def test_activity_task_creation(): + wfe = make_workflow_execution() + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + workflow_execution=wfe, + timeouts=ACTIVITY_TASK_TIMEOUTS, + ) + task.workflow_execution.should.equal(wfe) + task.state.should.equal("SCHEDULED") + task.task_token.should_not.be.empty + task.started_event_id.should.be.none + + task.start(123) + task.state.should.equal("STARTED") + task.started_event_id.should.equal(123) + + task.complete() + task.state.should.equal("COMPLETED") + + # NB: this doesn't make any sense for SWF, a task shouldn't go from a + # "COMPLETED" state to a "FAILED" one, but this is an internal state on our + # side and we don't care about invalid state transitions for now. + task.fail() + task.state.should.equal("FAILED") + + +def test_activity_task_full_dict_representation(): + wfe = make_workflow_execution() + at = ActivityTask( + activity_id="my-activity-123", + activity_type=ActivityType("foo", "v1.0"), + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + at.start(1234) + + fd = at.to_full_dict() + fd["activityId"].should.equal("my-activity-123") + fd["activityType"]["version"].should.equal("v1.0") + fd["input"].should.equal("optional") + fd["startedEventId"].should.equal(1234) + fd.should.contain("taskToken") + fd["workflowExecution"].should.equal(wfe.to_short_dict()) + + at.start(1234) + fd = at.to_full_dict() + fd["startedEventId"].should.equal(1234) + + +def test_activity_task_reset_heartbeat_clock(): + wfe = make_workflow_execution() + + with freeze_time("2015-01-01 12:00:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + + task.last_heartbeat_timestamp.should.equal(1420113600.0) + + with freeze_time("2015-01-01 13:00:00"): + task.reset_heartbeat_clock() + + task.last_heartbeat_timestamp.should.equal(1420117200.0) + + +def test_activity_task_first_timeout(): + wfe = make_workflow_execution() + + with freeze_time("2015-01-01 12:00:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + task.first_timeout().should.be.none + + # activity task timeout is 300s == 5mins + with freeze_time("2015-01-01 12:06:00"): + task.first_timeout().should.be.a(Timeout) + process_first_timeout(task) + task.state.should.equal("TIMED_OUT") + task.timeout_type.should.equal("HEARTBEAT") + + +def test_activity_task_cannot_timeout_on_closed_workflow_execution(): + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution() + wfe.start() + + with freeze_time("2015-01-01 13:58:00"): + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + + with freeze_time("2015-01-01 14:10:00"): + task.first_timeout().should.be.a(Timeout) + wfe.first_timeout().should.be.a(Timeout) + process_first_timeout(wfe) + task.first_timeout().should.be.none + + +def test_activity_task_cannot_change_state_on_closed_workflow_execution(): + wfe = make_workflow_execution() + wfe.start() + + task = ActivityTask( + activity_id="my-activity-123", + activity_type="foo", + input="optional", + scheduled_event_id=117, + timeouts=ACTIVITY_TASK_TIMEOUTS, + workflow_execution=wfe, + ) + wfe.complete(123) + + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) + task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) + task.fail.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_decision_task.py b/tests/test_swf/models/test_decision_task.py index b5e23eaca..b593db5ff 100644 --- a/tests/test_swf/models/test_decision_task.py +++ b/tests/test_swf/models/test_decision_task.py @@ -1,80 +1,80 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -from sure import expect - -from moto.swf.models import DecisionTask, Timeout -from moto.swf.exceptions import SWFWorkflowExecutionClosedError - -from ..utils import make_workflow_execution, process_first_timeout - - -def test_decision_task_creation(): - wfe = make_workflow_execution() - dt = DecisionTask(wfe, 123) - dt.workflow_execution.should.equal(wfe) - dt.state.should.equal("SCHEDULED") - dt.task_token.should_not.be.empty - dt.started_event_id.should.be.none - - -def test_decision_task_full_dict_representation(): - wfe = make_workflow_execution() - wft = wfe.workflow_type - dt = DecisionTask(wfe, 123) - - fd = dt.to_full_dict() - fd["events"].should.be.a("list") - fd["previousStartedEventId"].should.equal(0) - fd.should_not.contain("startedEventId") - fd.should.contain("taskToken") - fd["workflowExecution"].should.equal(wfe.to_short_dict()) - fd["workflowType"].should.equal(wft.to_short_dict()) - - dt.start(1234) - fd = dt.to_full_dict() - fd["startedEventId"].should.equal(1234) - - -def test_decision_task_first_timeout(): - wfe = make_workflow_execution() - dt = DecisionTask(wfe, 123) - dt.first_timeout().should.be.none - - with freeze_time("2015-01-01 12:00:00"): - dt.start(1234) - dt.first_timeout().should.be.none - - # activity task timeout is 300s == 5mins - with freeze_time("2015-01-01 12:06:00"): - dt.first_timeout().should.be.a(Timeout) - - dt.complete() - dt.first_timeout().should.be.none - - -def test_decision_task_cannot_timeout_on_closed_workflow_execution(): - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution() - wfe.start() - - with freeze_time("2015-01-01 13:55:00"): - dt = DecisionTask(wfe, 123) - dt.start(1234) - - with freeze_time("2015-01-01 14:10:00"): - dt.first_timeout().should.be.a(Timeout) - wfe.first_timeout().should.be.a(Timeout) - process_first_timeout(wfe) - dt.first_timeout().should.be.none - - -def test_decision_task_cannot_change_state_on_closed_workflow_execution(): - wfe = make_workflow_execution() - wfe.start() - task = DecisionTask(wfe, 123) - - wfe.complete(123) - - task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( - SWFWorkflowExecutionClosedError) - task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +from sure import expect + +from moto.swf.models import DecisionTask, Timeout +from moto.swf.exceptions import SWFWorkflowExecutionClosedError + +from ..utils import make_workflow_execution, process_first_timeout + + +def test_decision_task_creation(): + wfe = make_workflow_execution() + dt = DecisionTask(wfe, 123) + dt.workflow_execution.should.equal(wfe) + dt.state.should.equal("SCHEDULED") + dt.task_token.should_not.be.empty + dt.started_event_id.should.be.none + + +def test_decision_task_full_dict_representation(): + wfe = make_workflow_execution() + wft = wfe.workflow_type + dt = DecisionTask(wfe, 123) + + fd = dt.to_full_dict() + fd["events"].should.be.a("list") + fd["previousStartedEventId"].should.equal(0) + fd.should_not.contain("startedEventId") + fd.should.contain("taskToken") + fd["workflowExecution"].should.equal(wfe.to_short_dict()) + fd["workflowType"].should.equal(wft.to_short_dict()) + + dt.start(1234) + fd = dt.to_full_dict() + fd["startedEventId"].should.equal(1234) + + +def test_decision_task_first_timeout(): + wfe = make_workflow_execution() + dt = DecisionTask(wfe, 123) + dt.first_timeout().should.be.none + + with freeze_time("2015-01-01 12:00:00"): + dt.start(1234) + dt.first_timeout().should.be.none + + # activity task timeout is 300s == 5mins + with freeze_time("2015-01-01 12:06:00"): + dt.first_timeout().should.be.a(Timeout) + + dt.complete() + dt.first_timeout().should.be.none + + +def test_decision_task_cannot_timeout_on_closed_workflow_execution(): + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution() + wfe.start() + + with freeze_time("2015-01-01 13:55:00"): + dt = DecisionTask(wfe, 123) + dt.start(1234) + + with freeze_time("2015-01-01 14:10:00"): + dt.first_timeout().should.be.a(Timeout) + wfe.first_timeout().should.be.a(Timeout) + process_first_timeout(wfe) + dt.first_timeout().should.be.none + + +def test_decision_task_cannot_change_state_on_closed_workflow_execution(): + wfe = make_workflow_execution() + wfe.start() + task = DecisionTask(wfe, 123) + + wfe.complete(123) + + task.timeout.when.called_with(Timeout(task, 0, "foo")).should.throw( + SWFWorkflowExecutionClosedError) + task.complete.when.called_with().should.throw(SWFWorkflowExecutionClosedError) diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 1a8a1268d..1dc5cec65 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,119 +1,119 @@ -from collections import namedtuple -import sure # noqa - -from moto.swf.exceptions import SWFUnknownResourceFault -from moto.swf.models import Domain - -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - -# Fake WorkflowExecution for tests purposes -WorkflowExecution = namedtuple( - "WorkflowExecution", - ["workflow_id", "run_id", "execution_status", "open"] -) - - -def test_domain_short_dict_representation(): - domain = Domain("foo", "52") - domain.to_short_dict().should.equal( - {"name": "foo", "status": "REGISTERED"}) - - domain.description = "foo bar" - domain.to_short_dict()["description"].should.equal("foo bar") - - -def test_domain_full_dict_representation(): - domain = Domain("foo", "52") - - domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) - _config = domain.to_full_dict()["configuration"] - _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") - - -def test_domain_string_representation(): - domain = Domain("my-domain", "60") - str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") - - -def test_domain_add_to_activity_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.activity_task_lists.should.equal({ - "foo": ["bar"] - }) - - -def test_domain_activity_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_activity_task_list("foo", "bar") - domain.add_to_activity_task_list("other", "baz") - sorted(domain.activity_tasks).should.equal(["bar", "baz"]) - - -def test_domain_add_to_decision_task_list(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.decision_task_lists.should.equal({ - "foo": ["bar"] - }) - - -def test_domain_decision_tasks(): - domain = Domain("my-domain", "60") - domain.add_to_decision_task_list("foo", "bar") - domain.add_to_decision_task_list("other", "baz") - sorted(domain.decision_tasks).should.equal(["bar", "baz"]) - - -def test_domain_get_workflow_execution(): - domain = Domain("my-domain", "60") - - wfe1 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) - wfe2 = WorkflowExecution( - workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) - wfe3 = WorkflowExecution( - workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) - wfe4 = WorkflowExecution( - workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) - domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] - - # get workflow execution through workflow_id and run_id - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-1").should.equal(wfe1) - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-2").should.equal(wfe2) - domain.get_workflow_execution( - "wf-id-3", run_id="run-id-4").should.equal(wfe4) - - domain.get_workflow_execution.when.called_with( - "wf-id-1", run_id="non-existent" - ).should.throw( - SWFUnknownResourceFault, - ) - - # get OPEN workflow execution by default if no run_id - domain.get_workflow_execution("wf-id-1").should.equal(wfe1) - domain.get_workflow_execution.when.called_with( - "wf-id-3" - ).should.throw( - SWFUnknownResourceFault - ) - domain.get_workflow_execution.when.called_with( - "wf-id-non-existent" - ).should.throw( - SWFUnknownResourceFault - ) - - # raise_if_closed attribute - domain.get_workflow_execution( - "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) - domain.get_workflow_execution.when.called_with( - "wf-id-3", run_id="run-id-4", raise_if_closed=True - ).should.throw( - SWFUnknownResourceFault - ) - - # raise_if_none attribute - domain.get_workflow_execution("foo", raise_if_none=False).should.be.none +from collections import namedtuple +import sure # noqa + +from moto.swf.exceptions import SWFUnknownResourceFault +from moto.swf.models import Domain + +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa + +# Fake WorkflowExecution for tests purposes +WorkflowExecution = namedtuple( + "WorkflowExecution", + ["workflow_id", "run_id", "execution_status", "open"] +) + + +def test_domain_short_dict_representation(): + domain = Domain("foo", "52") + domain.to_short_dict().should.equal( + {"name": "foo", "status": "REGISTERED"}) + + domain.description = "foo bar" + domain.to_short_dict()["description"].should.equal("foo bar") + + +def test_domain_full_dict_representation(): + domain = Domain("foo", "52") + + domain.to_full_dict()["domainInfo"].should.equal(domain.to_short_dict()) + _config = domain.to_full_dict()["configuration"] + _config["workflowExecutionRetentionPeriodInDays"].should.equal("52") + + +def test_domain_string_representation(): + domain = Domain("my-domain", "60") + str(domain).should.equal("Domain(name: my-domain, status: REGISTERED)") + + +def test_domain_add_to_activity_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.activity_task_lists.should.equal({ + "foo": ["bar"] + }) + + +def test_domain_activity_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_activity_task_list("foo", "bar") + domain.add_to_activity_task_list("other", "baz") + sorted(domain.activity_tasks).should.equal(["bar", "baz"]) + + +def test_domain_add_to_decision_task_list(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.decision_task_lists.should.equal({ + "foo": ["bar"] + }) + + +def test_domain_decision_tasks(): + domain = Domain("my-domain", "60") + domain.add_to_decision_task_list("foo", "bar") + domain.add_to_decision_task_list("other", "baz") + sorted(domain.decision_tasks).should.equal(["bar", "baz"]) + + +def test_domain_get_workflow_execution(): + domain = Domain("my-domain", "60") + + wfe1 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-1", execution_status="OPEN", open=True) + wfe2 = WorkflowExecution( + workflow_id="wf-id-1", run_id="run-id-2", execution_status="CLOSED", open=False) + wfe3 = WorkflowExecution( + workflow_id="wf-id-2", run_id="run-id-3", execution_status="OPEN", open=True) + wfe4 = WorkflowExecution( + workflow_id="wf-id-3", run_id="run-id-4", execution_status="CLOSED", open=False) + domain.workflow_executions = [wfe1, wfe2, wfe3, wfe4] + + # get workflow execution through workflow_id and run_id + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1").should.equal(wfe1) + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-2").should.equal(wfe2) + domain.get_workflow_execution( + "wf-id-3", run_id="run-id-4").should.equal(wfe4) + + domain.get_workflow_execution.when.called_with( + "wf-id-1", run_id="non-existent" + ).should.throw( + SWFUnknownResourceFault, + ) + + # get OPEN workflow execution by default if no run_id + domain.get_workflow_execution("wf-id-1").should.equal(wfe1) + domain.get_workflow_execution.when.called_with( + "wf-id-3" + ).should.throw( + SWFUnknownResourceFault + ) + domain.get_workflow_execution.when.called_with( + "wf-id-non-existent" + ).should.throw( + SWFUnknownResourceFault + ) + + # raise_if_closed attribute + domain.get_workflow_execution( + "wf-id-1", run_id="run-id-1", raise_if_closed=True).should.equal(wfe1) + domain.get_workflow_execution.when.called_with( + "wf-id-3", run_id="run-id-4", raise_if_closed=True + ).should.throw( + SWFUnknownResourceFault + ) + + # raise_if_none attribute + domain.get_workflow_execution("foo", raise_if_none=False).should.be.none diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index 294df9f84..bea07ce1c 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -1,58 +1,58 @@ -from moto.swf.models import GenericType -import sure # noqa - - -# Tests for GenericType (ActivityType, WorkflowType) -class FooType(GenericType): - - @property - def kind(self): - return "foo" - - @property - def _configuration_keys(self): - return ["justAnExampleTimeout"] - - -def test_type_short_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"}) - - -def test_type_medium_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict()) - _type.to_medium_dict()["status"].should.equal("REGISTERED") - _type.to_medium_dict().should.contain("creationDate") - _type.to_medium_dict().should_not.contain("deprecationDate") - _type.to_medium_dict().should_not.contain("description") - - _type.description = "foo bar" - _type.to_medium_dict()["description"].should.equal("foo bar") - - _type.status = "DEPRECATED" - _type.to_medium_dict().should.contain("deprecationDate") - - -def test_type_full_dict_representation(): - _type = FooType("test-foo", "v1.0") - _type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict()) - _type.to_full_dict()["configuration"].should.equal({}) - - _type.task_list = "foo" - _type.to_full_dict()["configuration"][ - "defaultTaskList"].should.equal({"name": "foo"}) - - _type.just_an_example_timeout = "60" - _type.to_full_dict()["configuration"][ - "justAnExampleTimeout"].should.equal("60") - - _type.non_whitelisted_property = "34" - keys = _type.to_full_dict()["configuration"].keys() - sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"]) - - -def test_type_string_representation(): - _type = FooType("test-foo", "v1.0") - str(_type).should.equal( - "FooType(name: test-foo, version: v1.0, status: REGISTERED)") +from moto.swf.models import GenericType +import sure # noqa + + +# Tests for GenericType (ActivityType, WorkflowType) +class FooType(GenericType): + + @property + def kind(self): + return "foo" + + @property + def _configuration_keys(self): + return ["justAnExampleTimeout"] + + +def test_type_short_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_short_dict().should.equal({"name": "test-foo", "version": "v1.0"}) + + +def test_type_medium_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_medium_dict()["fooType"].should.equal(_type.to_short_dict()) + _type.to_medium_dict()["status"].should.equal("REGISTERED") + _type.to_medium_dict().should.contain("creationDate") + _type.to_medium_dict().should_not.contain("deprecationDate") + _type.to_medium_dict().should_not.contain("description") + + _type.description = "foo bar" + _type.to_medium_dict()["description"].should.equal("foo bar") + + _type.status = "DEPRECATED" + _type.to_medium_dict().should.contain("deprecationDate") + + +def test_type_full_dict_representation(): + _type = FooType("test-foo", "v1.0") + _type.to_full_dict()["typeInfo"].should.equal(_type.to_medium_dict()) + _type.to_full_dict()["configuration"].should.equal({}) + + _type.task_list = "foo" + _type.to_full_dict()["configuration"][ + "defaultTaskList"].should.equal({"name": "foo"}) + + _type.just_an_example_timeout = "60" + _type.to_full_dict()["configuration"][ + "justAnExampleTimeout"].should.equal("60") + + _type.non_whitelisted_property = "34" + keys = _type.to_full_dict()["configuration"].keys() + sorted(keys).should.equal(["defaultTaskList", "justAnExampleTimeout"]) + + +def test_type_string_representation(): + _type = FooType("test-foo", "v1.0") + str(_type).should.equal( + "FooType(name: test-foo, version: v1.0, status: REGISTERED)") diff --git a/tests/test_swf/models/test_history_event.py b/tests/test_swf/models/test_history_event.py index b869408ce..fcf4a4a55 100644 --- a/tests/test_swf/models/test_history_event.py +++ b/tests/test_swf/models/test_history_event.py @@ -1,31 +1,31 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import HistoryEvent - - -@freeze_time("2015-01-01 12:00:00") -def test_history_event_creation(): - he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) - he.event_id.should.equal(123) - he.event_type.should.equal("DecisionTaskStarted") - he.event_timestamp.should.equal(1420113600.0) - - -@freeze_time("2015-01-01 12:00:00") -def test_history_event_to_dict_representation(): - he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) - he.to_dict().should.equal({ - "eventId": 123, - "eventType": "DecisionTaskStarted", - "eventTimestamp": 1420113600.0, - "decisionTaskStartedEventAttributes": { - "scheduledEventId": 2 - } - }) - - -def test_history_event_breaks_on_initialization_if_not_implemented(): - HistoryEvent.when.called_with( - 123, "UnknownHistoryEvent" - ).should.throw(NotImplementedError) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import HistoryEvent + + +@freeze_time("2015-01-01 12:00:00") +def test_history_event_creation(): + he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) + he.event_id.should.equal(123) + he.event_type.should.equal("DecisionTaskStarted") + he.event_timestamp.should.equal(1420113600.0) + + +@freeze_time("2015-01-01 12:00:00") +def test_history_event_to_dict_representation(): + he = HistoryEvent(123, "DecisionTaskStarted", scheduled_event_id=2) + he.to_dict().should.equal({ + "eventId": 123, + "eventType": "DecisionTaskStarted", + "eventTimestamp": 1420113600.0, + "decisionTaskStartedEventAttributes": { + "scheduledEventId": 2 + } + }) + + +def test_history_event_breaks_on_initialization_if_not_implemented(): + HistoryEvent.when.called_with( + 123, "UnknownHistoryEvent" + ).should.throw(NotImplementedError) diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index fb52652fd..0ee059065 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,19 +1,19 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import Timeout - -from ..utils import make_workflow_execution - - -def test_timeout_creation(): - wfe = make_workflow_execution() - - # epoch 1420113600 == "2015-01-01 13:00:00" - timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") - - with freeze_time("2015-01-01 12:00:00"): - timeout.reached.should.be.falsy - - with freeze_time("2015-01-01 13:00:00"): - timeout.reached.should.be.truthy +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import Timeout + +from ..utils import make_workflow_execution + + +def test_timeout_creation(): + wfe = make_workflow_execution() + + # epoch 1420113600 == "2015-01-01 13:00:00" + timeout = Timeout(wfe, 1420117200, "START_TO_CLOSE") + + with freeze_time("2015-01-01 12:00:00"): + timeout.reached.should.be.falsy + + with freeze_time("2015-01-01 13:00:00"): + timeout.reached.should.be.truthy diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index 45b91c86a..7271cca7f 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -1,501 +1,501 @@ -from freezegun import freeze_time -import sure # noqa - -from moto.swf.models import ( - ActivityType, - Timeout, - WorkflowType, - WorkflowExecution, -) -from moto.swf.exceptions import SWFDefaultUndefinedFault -from ..utils import ( - auto_start_decision_tasks, - get_basic_domain, - get_basic_workflow_type, - make_workflow_execution, -) - - -VALID_ACTIVITY_TASK_ATTRIBUTES = { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.1"}, - "taskList": {"name": "task-list-name"}, - "scheduleToStartTimeout": "600", - "scheduleToCloseTimeout": "600", - "startToCloseTimeout": "600", - "heartbeatTimeout": "300", -} - - -def test_workflow_execution_creation(): - domain = get_basic_domain() - wft = get_basic_workflow_type() - wfe = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") - - wfe.domain.should.equal(domain) - wfe.workflow_type.should.equal(wft) - wfe.child_policy.should.equal("TERMINATE") - - -def test_workflow_execution_creation_child_policy_logic(): - domain = get_basic_domain() - - WorkflowExecution( - domain, - WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ), - "ab1234" - ).child_policy.should.equal("ABANDON") - - WorkflowExecution( - domain, - WorkflowType( - "test-workflow", "v1.0", task_list="queue", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ), - "ab1234", - child_policy="REQUEST_CANCEL" - ).child_policy.should.equal("REQUEST_CANCEL") - - WorkflowExecution.when.called_with( - domain, - WorkflowType("test-workflow", "v1.0"), "ab1234" - ).should.throw(SWFDefaultUndefinedFault) - - -def test_workflow_execution_string_representation(): - wfe = make_workflow_execution(child_policy="TERMINATE") - str(wfe).should.match(r"^WorkflowExecution\(run_id: .*\)") - - -def test_workflow_execution_generates_a_random_run_id(): - domain = get_basic_domain() - wft = get_basic_workflow_type() - wfe1 = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") - wfe2 = WorkflowExecution(domain, wft, "ab1235", child_policy="TERMINATE") - wfe1.run_id.should_not.equal(wfe2.run_id) - - -def test_workflow_execution_short_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - sd = wfe.to_short_dict() - sd["workflowId"].should.equal("ab1234") - sd.should.contain("runId") - - -def test_workflow_execution_medium_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - md = wfe.to_medium_dict() - md["execution"].should.equal(wfe.to_short_dict()) - md["workflowType"].should.equal(wf_type.to_short_dict()) - md["startTimestamp"].should.be.a('float') - md["executionStatus"].should.equal("OPEN") - md["cancelRequested"].should.be.falsy - md.should_not.contain("tagList") - - wfe.tag_list = ["foo", "bar", "baz"] - md = wfe.to_medium_dict() - md["tagList"].should.equal(["foo", "bar", "baz"]) - - -def test_workflow_execution_full_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - "test-workflow", "v1.0", - task_list="queue", default_child_policy="ABANDON", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - wfe = WorkflowExecution(domain, wf_type, "ab1234") - - fd = wfe.to_full_dict() - fd["executionInfo"].should.equal(wfe.to_medium_dict()) - fd["openCounts"]["openTimers"].should.equal(0) - fd["openCounts"]["openDecisionTasks"].should.equal(0) - fd["openCounts"]["openActivityTasks"].should.equal(0) - fd["executionConfiguration"].should.equal({ - "childPolicy": "ABANDON", - "executionStartToCloseTimeout": "300", - "taskList": {"name": "queue"}, - "taskStartToCloseTimeout": "300", - }) - - -def test_workflow_execution_list_dict_representation(): - domain = get_basic_domain() - wf_type = WorkflowType( - 'test-workflow', 'v1.0', - task_list='queue', default_child_policy='ABANDON', - default_execution_start_to_close_timeout='300', - default_task_start_to_close_timeout='300', - ) - wfe = WorkflowExecution(domain, wf_type, 'ab1234') - - ld = wfe.to_list_dict() - ld['workflowType']['version'].should.equal('v1.0') - ld['workflowType']['name'].should.equal('test-workflow') - ld['executionStatus'].should.equal('OPEN') - ld['execution']['workflowId'].should.equal('ab1234') - ld['execution'].should.contain('runId') - ld['cancelRequested'].should.be.false - ld.should.contain('startTimestamp') - - -def test_workflow_execution_schedule_decision_task(): - wfe = make_workflow_execution() - wfe.open_counts["openDecisionTasks"].should.equal(0) - wfe.schedule_decision_task() - wfe.open_counts["openDecisionTasks"].should.equal(1) - - -def test_workflow_execution_start_decision_task(): - wfe = make_workflow_execution() - wfe.schedule_decision_task() - dt = wfe.decision_tasks[0] - wfe.start_decision_task(dt.task_token, identity="srv01") - dt = wfe.decision_tasks[0] - dt.state.should.equal("STARTED") - wfe.events()[-1].event_type.should.equal("DecisionTaskStarted") - wfe.events()[-1].event_attributes["identity"].should.equal("srv01") - - -def test_workflow_execution_history_events_ids(): - wfe = make_workflow_execution() - wfe._add_event("WorkflowExecutionStarted") - wfe._add_event("DecisionTaskScheduled") - wfe._add_event("DecisionTaskStarted") - ids = [evt.event_id for evt in wfe.events()] - ids.should.equal([1, 2, 3]) - - -@freeze_time("2015-01-01 12:00:00") -def test_workflow_execution_start(): - wfe = make_workflow_execution() - wfe.events().should.equal([]) - - wfe.start() - wfe.start_timestamp.should.equal(1420113600.0) - wfe.events().should.have.length_of(2) - wfe.events()[0].event_type.should.equal("WorkflowExecutionStarted") - wfe.events()[1].event_type.should.equal("DecisionTaskScheduled") - - -@freeze_time("2015-01-02 12:00:00") -def test_workflow_execution_complete(): - wfe = make_workflow_execution() - wfe.complete(123, result="foo") - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("COMPLETED") - wfe.close_timestamp.should.equal(1420200000.0) - wfe.events()[-1].event_type.should.equal("WorkflowExecutionCompleted") - wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) - wfe.events()[-1].event_attributes["result"].should.equal("foo") - - -@freeze_time("2015-01-02 12:00:00") -def test_workflow_execution_fail(): - wfe = make_workflow_execution() - wfe.fail(123, details="some details", reason="my rules") - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("FAILED") - wfe.close_timestamp.should.equal(1420200000.0) - wfe.events()[-1].event_type.should.equal("WorkflowExecutionFailed") - wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) - wfe.events()[-1].event_attributes["details"].should.equal("some details") - wfe.events()[-1].event_attributes["reason"].should.equal("my rules") - - -@freeze_time("2015-01-01 12:00:00") -def test_workflow_execution_schedule_activity_task(): - wfe = make_workflow_execution() - wfe.latest_activity_task_timestamp.should.be.none - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - - wfe.latest_activity_task_timestamp.should.equal(1420113600.0) - - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes[ - "decisionTaskCompletedEventId"].should.equal(123) - last_event.event_attributes["taskList"][ - "name"].should.equal("task-list-name") - - wfe.activity_tasks.should.have.length_of(1) - task = wfe.activity_tasks[0] - task.activity_id.should.equal("my-activity-001") - task.activity_type.name.should.equal("test-activity") - wfe.domain.activity_task_lists["task-list-name"].should.contain(task) - - -def test_workflow_execution_schedule_activity_task_without_task_list_should_take_default(): - wfe = make_workflow_execution() - wfe.domain.add_type( - ActivityType("test-activity", "v1.2", task_list="foobar") - ) - wfe.schedule_activity_task(123, { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.2"}, - "scheduleToStartTimeout": "600", - "scheduleToCloseTimeout": "600", - "startToCloseTimeout": "600", - "heartbeatTimeout": "300", - }) - - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - last_event.event_attributes["taskList"]["name"].should.equal("foobar") - - task = wfe.activity_tasks[0] - wfe.domain.activity_task_lists["foobar"].should.contain(task) - - -def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attributes(): - wfe = make_workflow_execution() - at = ActivityType("test-activity", "v1.1") - at.status = "DEPRECATED" - wfe.domain.add_type(at) - wfe.domain.add_type(ActivityType("test-activity", "v1.2")) - - hsh = { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity-does-not-exists", "version": "v1.1"}, - } - - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_TYPE_DOES_NOT_EXIST") - - hsh["activityType"]["name"] = "test-activity" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_TYPE_DEPRECATED") - - hsh["activityType"]["version"] = "v1.2" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_TASK_LIST_UNDEFINED") - - hsh["taskList"] = {"name": "foobar"} - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") - - hsh["scheduleToStartTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") - - hsh["scheduleToCloseTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") - - hsh["startToCloseTimeout"] = "600" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") - - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.activity_tasks.should.have.length_of(0) - wfe.domain.activity_task_lists.should.have.length_of(0) - - hsh["heartbeatTimeout"] = "300" - wfe.schedule_activity_task(123, hsh) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - - task = wfe.activity_tasks[0] - wfe.domain.activity_task_lists["foobar"].should.contain(task) - wfe.open_counts["openDecisionTasks"].should.equal(0) - wfe.open_counts["openActivityTasks"].should.equal(1) - - -def test_workflow_execution_schedule_activity_task_failure_triggers_new_decision(): - wfe = make_workflow_execution() - wfe.start() - task_token = wfe.decision_tasks[-1].task_token - wfe.start_decision_task(task_token) - wfe.complete_decision_task( - task_token, - execution_context="free-form execution context", - decisions=[ - { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity-does-not-exist", - "version": "v1.2" - }, - } - }, - { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity-does-not-exist", - "version": "v1.2" - }, - } - }, - ]) - - wfe.latest_execution_context.should.equal("free-form execution context") - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.open_counts["openDecisionTasks"].should.equal(1) - last_events = wfe.events()[-3:] - last_events[0].event_type.should.equal("ScheduleActivityTaskFailed") - last_events[1].event_type.should.equal("ScheduleActivityTaskFailed") - last_events[2].event_type.should.equal("DecisionTaskScheduled") - - -def test_workflow_execution_schedule_activity_task_with_same_activity_id(): - wfe = make_workflow_execution() - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ActivityTaskScheduled") - - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - wfe.open_counts["openActivityTasks"].should.equal(1) - last_event = wfe.events()[-1] - last_event.event_type.should.equal("ScheduleActivityTaskFailed") - last_event.event_attributes["cause"].should.equal( - "ACTIVITY_ID_ALREADY_IN_USE") - - -def test_workflow_execution_start_activity_task(): - wfe = make_workflow_execution() - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - task_token = wfe.activity_tasks[-1].task_token - wfe.start_activity_task(task_token, identity="worker01") - task = wfe.activity_tasks[-1] - task.state.should.equal("STARTED") - wfe.events()[-1].event_type.should.equal("ActivityTaskStarted") - wfe.events()[-1].event_attributes["identity"].should.equal("worker01") - - -def test_complete_activity_task(): - wfe = make_workflow_execution() - wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) - task_token = wfe.activity_tasks[-1].task_token - - wfe.open_counts["openActivityTasks"].should.equal(1) - wfe.open_counts["openDecisionTasks"].should.equal(0) - - wfe.start_activity_task(task_token, identity="worker01") - wfe.complete_activity_task(task_token, result="a superb result") - - task = wfe.activity_tasks[-1] - task.state.should.equal("COMPLETED") - wfe.events()[-2].event_type.should.equal("ActivityTaskCompleted") - wfe.events()[-1].event_type.should.equal("DecisionTaskScheduled") - - wfe.open_counts["openActivityTasks"].should.equal(0) - wfe.open_counts["openDecisionTasks"].should.equal(1) - - -def test_terminate(): - wfe = make_workflow_execution() - wfe.schedule_decision_task() - wfe.terminate() - - wfe.execution_status.should.equal("CLOSED") - wfe.close_status.should.equal("TERMINATED") - wfe.close_cause.should.equal("OPERATOR_INITIATED") - wfe.open_counts["openDecisionTasks"].should.equal(1) - - last_event = wfe.events()[-1] - last_event.event_type.should.equal("WorkflowExecutionTerminated") - # take default child_policy if not provided (as here) - last_event.event_attributes["childPolicy"].should.equal("ABANDON") - - -def test_first_timeout(): - wfe = make_workflow_execution() - wfe.first_timeout().should.be.none - - with freeze_time("2015-01-01 12:00:00"): - wfe.start() - wfe.first_timeout().should.be.none - - with freeze_time("2015-01-01 14:01"): - # 2 hours timeout reached - wfe.first_timeout().should.be.a(Timeout) - - -# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more -# details -def test_timeouts_are_processed_in_order_and_reevaluated(): - # Let's make a Workflow Execution with the following properties: - # - execution start to close timeout of 8 mins - # - (decision) task start to close timeout of 5 mins - # - # Now start the workflow execution, and look at the history 15 mins later: - # - a first decision task is fired just after workflow execution start - # - the first decision task should have timed out after 5 mins - # - that fires a new decision task (which we hack to start automatically) - # - then the workflow timeouts after 8 mins (shows gradual reevaluation) - # - but the last scheduled decision task should *not* timeout (workflow closed) - with freeze_time("2015-01-01 12:00:00"): - wfe = make_workflow_execution( - execution_start_to_close_timeout=8 * 60, - task_start_to_close_timeout=5 * 60, - ) - # decision will automatically start - wfe = auto_start_decision_tasks(wfe) - wfe.start() - event_idx = len(wfe.events()) - - with freeze_time("2015-01-01 12:08:00"): - wfe._process_timeouts() - - event_types = [e.event_type for e in wfe.events()[event_idx:]] - event_types.should.equal([ - "DecisionTaskTimedOut", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "WorkflowExecutionTimedOut", - ]) +from freezegun import freeze_time +import sure # noqa + +from moto.swf.models import ( + ActivityType, + Timeout, + WorkflowType, + WorkflowExecution, +) +from moto.swf.exceptions import SWFDefaultUndefinedFault +from ..utils import ( + auto_start_decision_tasks, + get_basic_domain, + get_basic_workflow_type, + make_workflow_execution, +) + + +VALID_ACTIVITY_TASK_ATTRIBUTES = { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.1"}, + "taskList": {"name": "task-list-name"}, + "scheduleToStartTimeout": "600", + "scheduleToCloseTimeout": "600", + "startToCloseTimeout": "600", + "heartbeatTimeout": "300", +} + + +def test_workflow_execution_creation(): + domain = get_basic_domain() + wft = get_basic_workflow_type() + wfe = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") + + wfe.domain.should.equal(domain) + wfe.workflow_type.should.equal(wft) + wfe.child_policy.should.equal("TERMINATE") + + +def test_workflow_execution_creation_child_policy_logic(): + domain = get_basic_domain() + + WorkflowExecution( + domain, + WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ), + "ab1234" + ).child_policy.should.equal("ABANDON") + + WorkflowExecution( + domain, + WorkflowType( + "test-workflow", "v1.0", task_list="queue", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ), + "ab1234", + child_policy="REQUEST_CANCEL" + ).child_policy.should.equal("REQUEST_CANCEL") + + WorkflowExecution.when.called_with( + domain, + WorkflowType("test-workflow", "v1.0"), "ab1234" + ).should.throw(SWFDefaultUndefinedFault) + + +def test_workflow_execution_string_representation(): + wfe = make_workflow_execution(child_policy="TERMINATE") + str(wfe).should.match(r"^WorkflowExecution\(run_id: .*\)") + + +def test_workflow_execution_generates_a_random_run_id(): + domain = get_basic_domain() + wft = get_basic_workflow_type() + wfe1 = WorkflowExecution(domain, wft, "ab1234", child_policy="TERMINATE") + wfe2 = WorkflowExecution(domain, wft, "ab1235", child_policy="TERMINATE") + wfe1.run_id.should_not.equal(wfe2.run_id) + + +def test_workflow_execution_short_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + sd = wfe.to_short_dict() + sd["workflowId"].should.equal("ab1234") + sd.should.contain("runId") + + +def test_workflow_execution_medium_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + md = wfe.to_medium_dict() + md["execution"].should.equal(wfe.to_short_dict()) + md["workflowType"].should.equal(wf_type.to_short_dict()) + md["startTimestamp"].should.be.a('float') + md["executionStatus"].should.equal("OPEN") + md["cancelRequested"].should.be.falsy + md.should_not.contain("tagList") + + wfe.tag_list = ["foo", "bar", "baz"] + md = wfe.to_medium_dict() + md["tagList"].should.equal(["foo", "bar", "baz"]) + + +def test_workflow_execution_full_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", "v1.0", + task_list="queue", default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + + fd = wfe.to_full_dict() + fd["executionInfo"].should.equal(wfe.to_medium_dict()) + fd["openCounts"]["openTimers"].should.equal(0) + fd["openCounts"]["openDecisionTasks"].should.equal(0) + fd["openCounts"]["openActivityTasks"].should.equal(0) + fd["executionConfiguration"].should.equal({ + "childPolicy": "ABANDON", + "executionStartToCloseTimeout": "300", + "taskList": {"name": "queue"}, + "taskStartToCloseTimeout": "300", + }) + + +def test_workflow_execution_list_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + 'test-workflow', 'v1.0', + task_list='queue', default_child_policy='ABANDON', + default_execution_start_to_close_timeout='300', + default_task_start_to_close_timeout='300', + ) + wfe = WorkflowExecution(domain, wf_type, 'ab1234') + + ld = wfe.to_list_dict() + ld['workflowType']['version'].should.equal('v1.0') + ld['workflowType']['name'].should.equal('test-workflow') + ld['executionStatus'].should.equal('OPEN') + ld['execution']['workflowId'].should.equal('ab1234') + ld['execution'].should.contain('runId') + ld['cancelRequested'].should.be.false + ld.should.contain('startTimestamp') + + +def test_workflow_execution_schedule_decision_task(): + wfe = make_workflow_execution() + wfe.open_counts["openDecisionTasks"].should.equal(0) + wfe.schedule_decision_task() + wfe.open_counts["openDecisionTasks"].should.equal(1) + + +def test_workflow_execution_start_decision_task(): + wfe = make_workflow_execution() + wfe.schedule_decision_task() + dt = wfe.decision_tasks[0] + wfe.start_decision_task(dt.task_token, identity="srv01") + dt = wfe.decision_tasks[0] + dt.state.should.equal("STARTED") + wfe.events()[-1].event_type.should.equal("DecisionTaskStarted") + wfe.events()[-1].event_attributes["identity"].should.equal("srv01") + + +def test_workflow_execution_history_events_ids(): + wfe = make_workflow_execution() + wfe._add_event("WorkflowExecutionStarted") + wfe._add_event("DecisionTaskScheduled") + wfe._add_event("DecisionTaskStarted") + ids = [evt.event_id for evt in wfe.events()] + ids.should.equal([1, 2, 3]) + + +@freeze_time("2015-01-01 12:00:00") +def test_workflow_execution_start(): + wfe = make_workflow_execution() + wfe.events().should.equal([]) + + wfe.start() + wfe.start_timestamp.should.equal(1420113600.0) + wfe.events().should.have.length_of(2) + wfe.events()[0].event_type.should.equal("WorkflowExecutionStarted") + wfe.events()[1].event_type.should.equal("DecisionTaskScheduled") + + +@freeze_time("2015-01-02 12:00:00") +def test_workflow_execution_complete(): + wfe = make_workflow_execution() + wfe.complete(123, result="foo") + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("COMPLETED") + wfe.close_timestamp.should.equal(1420200000.0) + wfe.events()[-1].event_type.should.equal("WorkflowExecutionCompleted") + wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) + wfe.events()[-1].event_attributes["result"].should.equal("foo") + + +@freeze_time("2015-01-02 12:00:00") +def test_workflow_execution_fail(): + wfe = make_workflow_execution() + wfe.fail(123, details="some details", reason="my rules") + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("FAILED") + wfe.close_timestamp.should.equal(1420200000.0) + wfe.events()[-1].event_type.should.equal("WorkflowExecutionFailed") + wfe.events()[-1].event_attributes["decisionTaskCompletedEventId"].should.equal(123) + wfe.events()[-1].event_attributes["details"].should.equal("some details") + wfe.events()[-1].event_attributes["reason"].should.equal("my rules") + + +@freeze_time("2015-01-01 12:00:00") +def test_workflow_execution_schedule_activity_task(): + wfe = make_workflow_execution() + wfe.latest_activity_task_timestamp.should.be.none + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + + wfe.latest_activity_task_timestamp.should.equal(1420113600.0) + + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + last_event.event_attributes[ + "decisionTaskCompletedEventId"].should.equal(123) + last_event.event_attributes["taskList"][ + "name"].should.equal("task-list-name") + + wfe.activity_tasks.should.have.length_of(1) + task = wfe.activity_tasks[0] + task.activity_id.should.equal("my-activity-001") + task.activity_type.name.should.equal("test-activity") + wfe.domain.activity_task_lists["task-list-name"].should.contain(task) + + +def test_workflow_execution_schedule_activity_task_without_task_list_should_take_default(): + wfe = make_workflow_execution() + wfe.domain.add_type( + ActivityType("test-activity", "v1.2", task_list="foobar") + ) + wfe.schedule_activity_task(123, { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.2"}, + "scheduleToStartTimeout": "600", + "scheduleToCloseTimeout": "600", + "startToCloseTimeout": "600", + "heartbeatTimeout": "300", + }) + + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + last_event.event_attributes["taskList"]["name"].should.equal("foobar") + + task = wfe.activity_tasks[0] + wfe.domain.activity_task_lists["foobar"].should.contain(task) + + +def test_workflow_execution_schedule_activity_task_should_fail_if_wrong_attributes(): + wfe = make_workflow_execution() + at = ActivityType("test-activity", "v1.1") + at.status = "DEPRECATED" + wfe.domain.add_type(at) + wfe.domain.add_type(ActivityType("test-activity", "v1.2")) + + hsh = { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity-does-not-exists", "version": "v1.1"}, + } + + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DOES_NOT_EXIST") + + hsh["activityType"]["name"] = "test-activity" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_TYPE_DEPRECATED") + + hsh["activityType"]["version"] = "v1.2" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_TASK_LIST_UNDEFINED") + + hsh["taskList"] = {"name": "foobar"} + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_START_TIMEOUT_UNDEFINED") + + hsh["scheduleToStartTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_SCHEDULE_TO_CLOSE_TIMEOUT_UNDEFINED") + + hsh["scheduleToCloseTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_START_TO_CLOSE_TIMEOUT_UNDEFINED") + + hsh["startToCloseTimeout"] = "600" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "DEFAULT_HEARTBEAT_TIMEOUT_UNDEFINED") + + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.activity_tasks.should.have.length_of(0) + wfe.domain.activity_task_lists.should.have.length_of(0) + + hsh["heartbeatTimeout"] = "300" + wfe.schedule_activity_task(123, hsh) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + + task = wfe.activity_tasks[0] + wfe.domain.activity_task_lists["foobar"].should.contain(task) + wfe.open_counts["openDecisionTasks"].should.equal(0) + wfe.open_counts["openActivityTasks"].should.equal(1) + + +def test_workflow_execution_schedule_activity_task_failure_triggers_new_decision(): + wfe = make_workflow_execution() + wfe.start() + task_token = wfe.decision_tasks[-1].task_token + wfe.start_decision_task(task_token) + wfe.complete_decision_task( + task_token, + execution_context="free-form execution context", + decisions=[ + { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity-does-not-exist", + "version": "v1.2" + }, + } + }, + { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity-does-not-exist", + "version": "v1.2" + }, + } + }, + ]) + + wfe.latest_execution_context.should.equal("free-form execution context") + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.open_counts["openDecisionTasks"].should.equal(1) + last_events = wfe.events()[-3:] + last_events[0].event_type.should.equal("ScheduleActivityTaskFailed") + last_events[1].event_type.should.equal("ScheduleActivityTaskFailed") + last_events[2].event_type.should.equal("DecisionTaskScheduled") + + +def test_workflow_execution_schedule_activity_task_with_same_activity_id(): + wfe = make_workflow_execution() + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ActivityTaskScheduled") + + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + wfe.open_counts["openActivityTasks"].should.equal(1) + last_event = wfe.events()[-1] + last_event.event_type.should.equal("ScheduleActivityTaskFailed") + last_event.event_attributes["cause"].should.equal( + "ACTIVITY_ID_ALREADY_IN_USE") + + +def test_workflow_execution_start_activity_task(): + wfe = make_workflow_execution() + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + task_token = wfe.activity_tasks[-1].task_token + wfe.start_activity_task(task_token, identity="worker01") + task = wfe.activity_tasks[-1] + task.state.should.equal("STARTED") + wfe.events()[-1].event_type.should.equal("ActivityTaskStarted") + wfe.events()[-1].event_attributes["identity"].should.equal("worker01") + + +def test_complete_activity_task(): + wfe = make_workflow_execution() + wfe.schedule_activity_task(123, VALID_ACTIVITY_TASK_ATTRIBUTES) + task_token = wfe.activity_tasks[-1].task_token + + wfe.open_counts["openActivityTasks"].should.equal(1) + wfe.open_counts["openDecisionTasks"].should.equal(0) + + wfe.start_activity_task(task_token, identity="worker01") + wfe.complete_activity_task(task_token, result="a superb result") + + task = wfe.activity_tasks[-1] + task.state.should.equal("COMPLETED") + wfe.events()[-2].event_type.should.equal("ActivityTaskCompleted") + wfe.events()[-1].event_type.should.equal("DecisionTaskScheduled") + + wfe.open_counts["openActivityTasks"].should.equal(0) + wfe.open_counts["openDecisionTasks"].should.equal(1) + + +def test_terminate(): + wfe = make_workflow_execution() + wfe.schedule_decision_task() + wfe.terminate() + + wfe.execution_status.should.equal("CLOSED") + wfe.close_status.should.equal("TERMINATED") + wfe.close_cause.should.equal("OPERATOR_INITIATED") + wfe.open_counts["openDecisionTasks"].should.equal(1) + + last_event = wfe.events()[-1] + last_event.event_type.should.equal("WorkflowExecutionTerminated") + # take default child_policy if not provided (as here) + last_event.event_attributes["childPolicy"].should.equal("ABANDON") + + +def test_first_timeout(): + wfe = make_workflow_execution() + wfe.first_timeout().should.be.none + + with freeze_time("2015-01-01 12:00:00"): + wfe.start() + wfe.first_timeout().should.be.none + + with freeze_time("2015-01-01 14:01"): + # 2 hours timeout reached + wfe.first_timeout().should.be.a(Timeout) + + +# See moto/swf/models/workflow_execution.py "_process_timeouts()" for more +# details +def test_timeouts_are_processed_in_order_and_reevaluated(): + # Let's make a Workflow Execution with the following properties: + # - execution start to close timeout of 8 mins + # - (decision) task start to close timeout of 5 mins + # + # Now start the workflow execution, and look at the history 15 mins later: + # - a first decision task is fired just after workflow execution start + # - the first decision task should have timed out after 5 mins + # - that fires a new decision task (which we hack to start automatically) + # - then the workflow timeouts after 8 mins (shows gradual reevaluation) + # - but the last scheduled decision task should *not* timeout (workflow closed) + with freeze_time("2015-01-01 12:00:00"): + wfe = make_workflow_execution( + execution_start_to_close_timeout=8 * 60, + task_start_to_close_timeout=5 * 60, + ) + # decision will automatically start + wfe = auto_start_decision_tasks(wfe) + wfe.start() + event_idx = len(wfe.events()) + + with freeze_time("2015-01-01 12:08:00"): + wfe._process_timeouts() + + event_types = [e.event_type for e in wfe.events()[event_idx:]] + event_types.should.equal([ + "DecisionTaskTimedOut", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "WorkflowExecutionTimedOut", + ]) diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index c0b8897b9..e67013f6b 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,228 +1,228 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated -from moto.swf import swf_backend - -from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION - - -# PollForActivityTask endpoint -@mock_swf_deprecated -def test_poll_for_activity_task_when_one(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - resp = conn.poll_for_activity_task( - "test-domain", "activity-task-list", identity="surprise") - resp["activityId"].should.equal("my-activity-001") - resp["taskToken"].should_not.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") - resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( - {"identity": "surprise", "scheduledEventId": 5} - ) - - -@mock_swf_deprecated -def test_poll_for_activity_task_when_none(): - conn = setup_workflow() - resp = conn.poll_for_activity_task("test-domain", "activity-task-list") - resp.should.equal({"startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_activity_task_on_non_existent_queue(): - conn = setup_workflow() - resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") - resp.should.equal({"startedEventId": 0}) - - -# CountPendingActivityTasks endpoint -@mock_swf_deprecated -def test_count_pending_activity_tasks(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - - resp = conn.count_pending_activity_tasks( - "test-domain", "activity-task-list") - resp.should.equal({"count": 1, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_on_non_existent_task_list(): - conn = setup_workflow() - resp = conn.count_pending_activity_tasks("test-domain", "non-existent") - resp.should.equal({"count": 0, "truncated": False}) - - -# RespondActivityTaskCompleted endpoint -@mock_swf_deprecated -def test_respond_activity_task_completed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.respond_activity_task_completed( - activity_token, result="result of the task") - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") - resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( - {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} - ) - - -@mock_swf_deprecated -def test_respond_activity_task_completed_on_closed_workflow_execution(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - # bad: we're closing workflow execution manually, but endpoints are not - # coded for now.. - wfe = swf_backend.domains[0].workflow_executions[-1] - wfe.execution_status = "CLOSED" - # /bad - - conn.respond_activity_task_completed.when.called_with( - activity_token - ).should.throw(SWFResponseError, "WorkflowExecution=") - - -@mock_swf_deprecated -def test_respond_activity_task_completed_with_task_already_completed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - conn.respond_activity_task_completed(activity_token) - - conn.respond_activity_task_completed.when.called_with( - activity_token - ).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5") - - -# RespondActivityTaskFailed endpoint -@mock_swf_deprecated -def test_respond_activity_task_failed(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.respond_activity_task_failed(activity_token, - reason="short reason", - details="long details") - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") - resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( - {"reason": "short reason", "details": "long details", - "scheduledEventId": 5, "startedEventId": 6} - ) - - -@mock_swf_deprecated -def test_respond_activity_task_completed_with_wrong_token(): - # NB: we just test ONE failure case for RespondActivityTaskFailed - # because the safeguards are shared with RespondActivityTaskCompleted, so - # no need to retest everything end-to-end. - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task("test-domain", "activity-task-list") - conn.respond_activity_task_failed.when.called_with( - "not-a-correct-token" - ).should.throw(SWFResponseError, "Invalid token") - - -# RecordActivityTaskHeartbeat endpoint -@mock_swf_deprecated -def test_record_activity_task_heartbeat(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - resp = conn.record_activity_task_heartbeat(activity_token) - resp.should.equal({"cancelRequested": False}) - - -@mock_swf_deprecated -def test_record_activity_task_heartbeat_with_wrong_token(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - - conn.record_activity_task_heartbeat.when.called_with( - "bad-token", details="some progress details" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - with freeze_time("2015-01-01 12:00:00"): - activity_token = conn.poll_for_activity_task( - "test-domain", "activity-task-list")["taskToken"] - conn.record_activity_task_heartbeat( - activity_token, details="some progress details") - - with freeze_time("2015-01-01 12:05:30"): - # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") - attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] - attrs["details"].should.equal("some progress details") +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated +from moto.swf import swf_backend + +from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION + + +# PollForActivityTask endpoint +@mock_swf_deprecated +def test_poll_for_activity_task_when_one(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + resp = conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") + resp["activityId"].should.equal("my-activity-001") + resp["taskToken"].should_not.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") + resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal( + {"identity": "surprise", "scheduledEventId": 5} + ) + + +@mock_swf_deprecated +def test_poll_for_activity_task_when_none(): + conn = setup_workflow() + resp = conn.poll_for_activity_task("test-domain", "activity-task-list") + resp.should.equal({"startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_activity_task_on_non_existent_queue(): + conn = setup_workflow() + resp = conn.poll_for_activity_task("test-domain", "non-existent-queue") + resp.should.equal({"startedEventId": 0}) + + +# CountPendingActivityTasks endpoint +@mock_swf_deprecated +def test_count_pending_activity_tasks(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + + resp = conn.count_pending_activity_tasks( + "test-domain", "activity-task-list") + resp.should.equal({"count": 1, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_on_non_existent_task_list(): + conn = setup_workflow() + resp = conn.count_pending_activity_tasks("test-domain", "non-existent") + resp.should.equal({"count": 0, "truncated": False}) + + +# RespondActivityTaskCompleted endpoint +@mock_swf_deprecated +def test_respond_activity_task_completed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.respond_activity_task_completed( + activity_token, result="result of the task") + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted") + resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal( + {"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6} + ) + + +@mock_swf_deprecated +def test_respond_activity_task_completed_on_closed_workflow_execution(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. + wfe = swf_backend.domains[0].workflow_executions[-1] + wfe.execution_status = "CLOSED" + # /bad + + conn.respond_activity_task_completed.when.called_with( + activity_token + ).should.throw(SWFResponseError, "WorkflowExecution=") + + +@mock_swf_deprecated +def test_respond_activity_task_completed_with_task_already_completed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + conn.respond_activity_task_completed(activity_token) + + conn.respond_activity_task_completed.when.called_with( + activity_token + ).should.throw(SWFResponseError, "Unknown activity, scheduledEventId = 5") + + +# RespondActivityTaskFailed endpoint +@mock_swf_deprecated +def test_respond_activity_task_failed(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.respond_activity_task_failed(activity_token, + reason="short reason", + details="long details") + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed") + resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal( + {"reason": "short reason", "details": "long details", + "scheduledEventId": 5, "startedEventId": 6} + ) + + +@mock_swf_deprecated +def test_respond_activity_task_completed_with_wrong_token(): + # NB: we just test ONE failure case for RespondActivityTaskFailed + # because the safeguards are shared with RespondActivityTaskCompleted, so + # no need to retest everything end-to-end. + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task("test-domain", "activity-task-list") + conn.respond_activity_task_failed.when.called_with( + "not-a-correct-token" + ).should.throw(SWFResponseError, "Invalid token") + + +# RecordActivityTaskHeartbeat endpoint +@mock_swf_deprecated +def test_record_activity_task_heartbeat(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + resp = conn.record_activity_task_heartbeat(activity_token) + resp.should.equal({"cancelRequested": False}) + + +@mock_swf_deprecated +def test_record_activity_task_heartbeat_with_wrong_token(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + + conn.record_activity_task_heartbeat.when.called_with( + "bad-token", details="some progress details" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout(): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + with freeze_time("2015-01-01 12:00:00"): + activity_token = conn.poll_for_activity_task( + "test-domain", "activity-task-list")["taskToken"] + conn.record_activity_task_heartbeat( + activity_token, details="some progress details") + + with freeze_time("2015-01-01 12:05:30"): + # => Activity Task Heartbeat timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") + attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] + attrs["details"].should.equal("some progress details") diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index 95d8a3733..7bb66ac32 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,134 +1,134 @@ -import boto -from boto.swf.exceptions import SWFResponseError -import sure # noqa - -from moto import mock_swf_deprecated - - -# RegisterActivityType endpoint -@mock_swf_deprecated -def test_register_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - - types = conn.list_activity_types("test-domain", "REGISTERED") - actype = types["typeInfos"][0] - actype["activityType"]["name"].should.equal("test-activity") - actype["activityType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_register_already_existing_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - - conn.register_activity_type.when.called_with( - "test-domain", "test-activity", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.register_activity_type.when.called_with( - "test-domain", "test-activity", 12 - ).should.throw(SWFResponseError) - - -# ListActivityTypes endpoint -@mock_swf_deprecated -def test_list_activity_types(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "b-test-activity", "v1.0") - conn.register_activity_type("test-domain", "a-test-activity", "v1.0") - conn.register_activity_type("test-domain", "c-test-activity", "v1.0") - - all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") - names = [activity_type["activityType"]["name"] - for activity_type in all_activity_types["typeInfos"]] - names.should.equal( - ["a-test-activity", "b-test-activity", "c-test-activity"]) - - -@mock_swf_deprecated -def test_list_activity_types_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "b-test-activity", "v1.0") - conn.register_activity_type("test-domain", "a-test-activity", "v1.0") - conn.register_activity_type("test-domain", "c-test-activity", "v1.0") - - all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", - reverse_order=True) - names = [activity_type["activityType"]["name"] - for activity_type in all_activity_types["typeInfos"]] - names.should.equal( - ["c-test-activity", "b-test-activity", "a-test-activity"]) - - -# DeprecateActivityType endpoint -@mock_swf_deprecated -def test_deprecate_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") - - actypes = conn.list_activity_types("test-domain", "DEPRECATED") - actype = actypes["typeInfos"][0] - actype["activityType"]["name"].should.equal("test-activity") - actype["activityType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0") - conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") - - conn.deprecate_activity_type.when.called_with( - "test-domain", "test-activity", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.deprecate_activity_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeActivityType endpoint -@mock_swf_deprecated -def test_describe_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_activity_type("test-domain", "test-activity", "v1.0", - task_list="foo", default_task_heartbeat_timeout="32") - - actype = conn.describe_activity_type( - "test-domain", "test-activity", "v1.0") - actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") - infos = actype["typeInfo"] - infos["activityType"]["name"].should.equal("test-activity") - infos["activityType"]["version"].should.equal("v1.0") - infos["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_activity_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.describe_activity_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) +import boto +from boto.swf.exceptions import SWFResponseError +import sure # noqa + +from moto import mock_swf_deprecated + + +# RegisterActivityType endpoint +@mock_swf_deprecated +def test_register_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + + types = conn.list_activity_types("test-domain", "REGISTERED") + actype = types["typeInfos"][0] + actype["activityType"]["name"].should.equal("test-activity") + actype["activityType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_register_already_existing_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + + conn.register_activity_type.when.called_with( + "test-domain", "test-activity", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.register_activity_type.when.called_with( + "test-domain", "test-activity", 12 + ).should.throw(SWFResponseError) + + +# ListActivityTypes endpoint +@mock_swf_deprecated +def test_list_activity_types(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "b-test-activity", "v1.0") + conn.register_activity_type("test-domain", "a-test-activity", "v1.0") + conn.register_activity_type("test-domain", "c-test-activity", "v1.0") + + all_activity_types = conn.list_activity_types("test-domain", "REGISTERED") + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["a-test-activity", "b-test-activity", "c-test-activity"]) + + +@mock_swf_deprecated +def test_list_activity_types_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "b-test-activity", "v1.0") + conn.register_activity_type("test-domain", "a-test-activity", "v1.0") + conn.register_activity_type("test-domain", "c-test-activity", "v1.0") + + all_activity_types = conn.list_activity_types("test-domain", "REGISTERED", + reverse_order=True) + names = [activity_type["activityType"]["name"] + for activity_type in all_activity_types["typeInfos"]] + names.should.equal( + ["c-test-activity", "b-test-activity", "a-test-activity"]) + + +# DeprecateActivityType endpoint +@mock_swf_deprecated +def test_deprecate_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") + + actypes = conn.list_activity_types("test-domain", "DEPRECATED") + actype = actypes["typeInfos"][0] + actype["activityType"]["name"].should.equal("test-activity") + actype["activityType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0") + conn.deprecate_activity_type("test-domain", "test-activity", "v1.0") + + conn.deprecate_activity_type.when.called_with( + "test-domain", "test-activity", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.deprecate_activity_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeActivityType endpoint +@mock_swf_deprecated +def test_describe_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_activity_type("test-domain", "test-activity", "v1.0", + task_list="foo", default_task_heartbeat_timeout="32") + + actype = conn.describe_activity_type( + "test-domain", "test-activity", "v1.0") + actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") + infos = actype["typeInfo"] + infos["activityType"]["name"].should.equal("test-activity") + infos["activityType"]["version"].should.equal("v1.0") + infos["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_activity_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.describe_activity_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 972b1053b..ecb3c3117 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,342 +1,342 @@ -from boto.swf.exceptions import SWFResponseError -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated -from moto.swf import swf_backend - -from ..utils import setup_workflow - - -# PollForDecisionTask endpoint -@mock_swf_deprecated -def test_poll_for_decision_task_when_one(): - conn = setup_workflow() - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - - resp = conn.poll_for_decision_task( - "test-domain", "queue", identity="srv01") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", - "DecisionTaskScheduled", "DecisionTaskStarted"]) - - resp[ - "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") - - -@mock_swf_deprecated -def test_poll_for_decision_task_when_none(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - - resp = conn.poll_for_decision_task("test-domain", "queue") - # this is the DecisionTask representation you get from the real SWF - # after waiting 60s when there's no decision to be taken - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_decision_task_on_non_existent_queue(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") - resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) - - -@mock_swf_deprecated -def test_poll_for_decision_task_with_reverse_order(): - conn = setup_workflow() - resp = conn.poll_for_decision_task( - "test-domain", "queue", reverse_order=True) - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal( - ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) - - -# CountPendingDecisionTasks endpoint -@mock_swf_deprecated -def test_count_pending_decision_tasks(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - resp = conn.count_pending_decision_tasks("test-domain", "queue") - resp.should.equal({"count": 1, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_on_non_existent_task_list(): - conn = setup_workflow() - resp = conn.count_pending_decision_tasks("test-domain", "non-existent") - resp.should.equal({"count": 0, "truncated": False}) - - -@mock_swf_deprecated -def test_count_pending_decision_tasks_after_decision_completes(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - conn.respond_decision_task_completed(resp["taskToken"]) - - resp = conn.count_pending_decision_tasks("test-domain", "queue") - resp.should.equal({"count": 0, "truncated": False}) - - -# RespondDecisionTaskCompleted endpoint -@mock_swf_deprecated -def test_respond_decision_task_completed_with_no_decision(): - conn = setup_workflow() - - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - resp = conn.respond_decision_task_completed( - task_token, - execution_context="free-form context", - ) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - ]) - evt = resp["events"][-1] - evt["decisionTaskCompletedEventAttributes"].should.equal({ - "executionContext": "free-form context", - "scheduledEventId": 2, - "startedEventId": 3, - }) - - resp = conn.describe_workflow_execution( - "test-domain", conn.run_id, "uid-abcd1234") - resp["latestExecutionContext"].should.equal("free-form context") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_wrong_token(): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue") - conn.respond_decision_task_completed.when.called_with( - "not-a-correct-token" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_on_close_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - # bad: we're closing workflow execution manually, but endpoints are not - # coded for now.. - wfe = swf_backend.domains[0].workflow_executions[-1] - wfe.execution_status = "CLOSED" - # /bad - - conn.respond_decision_task_completed.when.called_with( - task_token - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_task_already_completed(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - conn.respond_decision_task_completed(task_token) - - conn.respond_decision_task_completed.when.called_with( - task_token - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_complete_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "CompleteWorkflowExecution", - "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "WorkflowExecutionCompleted", - ]) - resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ - "result"].should.equal("foo bar") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_close_decision_not_last(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "CompleteWorkflowExecution"}, - {"decisionType": "WeDontCare"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw(SWFResponseError, r"Close must be last decision in list") - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_invalid_decision_type(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "BadDecisionType"}, - {"decisionType": "CompleteWorkflowExecution"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions).should.throw( - SWFResponseError, - r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_missing_attributes(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - { - "decisionType": "should trigger even with incorrect decision type", - "startTimerDecisionAttributes": {} - }, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw( - SWFResponseError, - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " - r"failed to satisfy constraint: Member must not be null" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_missing_attributes_totally(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [ - {"decisionType": "StartTimer"}, - ] - - conn.respond_decision_task_completed.when.called_with( - task_token, decisions=decisions - ).should.throw( - SWFResponseError, - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " - r"failed to satisfy constraint: Member must not be null" - ) - - -@mock_swf_deprecated -def test_respond_decision_task_completed_with_fail_workflow_execution(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "FailWorkflowExecution", - "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "WorkflowExecutionFailed", - ]) - attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"] - attrs["reason"].should.equal("my rules") - attrs["details"].should.equal("foo") - - -@mock_swf_deprecated -@freeze_time("2015-01-01 12:00:00") -def test_respond_decision_task_completed_with_schedule_activity_task(): - conn = setup_workflow() - resp = conn.poll_for_decision_task("test-domain", "queue") - task_token = resp["taskToken"] - - decisions = [{ - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity", - "version": "v1.1" - }, - "heartbeatTimeout": "60", - "input": "123", - "taskList": { - "name": "my-task-list" - }, - } - }] - resp = conn.respond_decision_task_completed( - task_token, decisions=decisions) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal([ - "WorkflowExecutionStarted", - "DecisionTaskScheduled", - "DecisionTaskStarted", - "DecisionTaskCompleted", - "ActivityTaskScheduled", - ]) - resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal({ - "decisionTaskCompletedEventId": 4, - "activityId": "my-activity-001", - "activityType": { - "name": "test-activity", - "version": "v1.1", - }, - "heartbeatTimeout": "60", - "input": "123", - "taskList": { - "name": "my-task-list" - }, - }) - - resp = conn.describe_workflow_execution( - "test-domain", conn.run_id, "uid-abcd1234") - resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) +from boto.swf.exceptions import SWFResponseError +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated +from moto.swf import swf_backend + +from ..utils import setup_workflow + + +# PollForDecisionTask endpoint +@mock_swf_deprecated +def test_poll_for_decision_task_when_one(): + conn = setup_workflow() + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) + + resp = conn.poll_for_decision_task( + "test-domain", "queue", identity="srv01") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", + "DecisionTaskScheduled", "DecisionTaskStarted"]) + + resp[ + "events"][-1]["decisionTaskStartedEventAttributes"]["identity"].should.equal("srv01") + + +@mock_swf_deprecated +def test_poll_for_decision_task_when_none(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + + resp = conn.poll_for_decision_task("test-domain", "queue") + # this is the DecisionTask representation you get from the real SWF + # after waiting 60s when there's no decision to be taken + resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_decision_task_on_non_existent_queue(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "non-existent-queue") + resp.should.equal({"previousStartedEventId": 0, "startedEventId": 0}) + + +@mock_swf_deprecated +def test_poll_for_decision_task_with_reverse_order(): + conn = setup_workflow() + resp = conn.poll_for_decision_task( + "test-domain", "queue", reverse_order=True) + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal( + ["DecisionTaskStarted", "DecisionTaskScheduled", "WorkflowExecutionStarted"]) + + +# CountPendingDecisionTasks endpoint +@mock_swf_deprecated +def test_count_pending_decision_tasks(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + resp = conn.count_pending_decision_tasks("test-domain", "queue") + resp.should.equal({"count": 1, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_on_non_existent_task_list(): + conn = setup_workflow() + resp = conn.count_pending_decision_tasks("test-domain", "non-existent") + resp.should.equal({"count": 0, "truncated": False}) + + +@mock_swf_deprecated +def test_count_pending_decision_tasks_after_decision_completes(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + conn.respond_decision_task_completed(resp["taskToken"]) + + resp = conn.count_pending_decision_tasks("test-domain", "queue") + resp.should.equal({"count": 0, "truncated": False}) + + +# RespondDecisionTaskCompleted endpoint +@mock_swf_deprecated +def test_respond_decision_task_completed_with_no_decision(): + conn = setup_workflow() + + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + resp = conn.respond_decision_task_completed( + task_token, + execution_context="free-form context", + ) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + ]) + evt = resp["events"][-1] + evt["decisionTaskCompletedEventAttributes"].should.equal({ + "executionContext": "free-form context", + "scheduledEventId": 2, + "startedEventId": 3, + }) + + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") + resp["latestExecutionContext"].should.equal("free-form context") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_wrong_token(): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue") + conn.respond_decision_task_completed.when.called_with( + "not-a-correct-token" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_on_close_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + # bad: we're closing workflow execution manually, but endpoints are not + # coded for now.. + wfe = swf_backend.domains[0].workflow_executions[-1] + wfe.execution_status = "CLOSED" + # /bad + + conn.respond_decision_task_completed.when.called_with( + task_token + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_task_already_completed(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + conn.respond_decision_task_completed(task_token) + + conn.respond_decision_task_completed.when.called_with( + task_token + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_complete_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "CompleteWorkflowExecution", + "completeWorkflowExecutionDecisionAttributes": {"result": "foo bar"} + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "WorkflowExecutionCompleted", + ]) + resp["events"][-1]["workflowExecutionCompletedEventAttributes"][ + "result"].should.equal("foo bar") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_close_decision_not_last(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "CompleteWorkflowExecution"}, + {"decisionType": "WeDontCare"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw(SWFResponseError, r"Close must be last decision in list") + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_invalid_decision_type(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "BadDecisionType"}, + {"decisionType": "CompleteWorkflowExecution"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions).should.throw( + SWFResponseError, + r"Value 'BadDecisionType' at 'decisions.1.member.decisionType'" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_missing_attributes(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + { + "decisionType": "should trigger even with incorrect decision type", + "startTimerDecisionAttributes": {} + }, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw( + SWFResponseError, + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " + r"failed to satisfy constraint: Member must not be null" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_missing_attributes_totally(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [ + {"decisionType": "StartTimer"}, + ] + + conn.respond_decision_task_completed.when.called_with( + task_token, decisions=decisions + ).should.throw( + SWFResponseError, + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.timerId' " + r"failed to satisfy constraint: Member must not be null" + ) + + +@mock_swf_deprecated +def test_respond_decision_task_completed_with_fail_workflow_execution(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "FailWorkflowExecution", + "failWorkflowExecutionDecisionAttributes": {"reason": "my rules", "details": "foo"} + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "WorkflowExecutionFailed", + ]) + attrs = resp["events"][-1]["workflowExecutionFailedEventAttributes"] + attrs["reason"].should.equal("my rules") + attrs["details"].should.equal("foo") + + +@mock_swf_deprecated +@freeze_time("2015-01-01 12:00:00") +def test_respond_decision_task_completed_with_schedule_activity_task(): + conn = setup_workflow() + resp = conn.poll_for_decision_task("test-domain", "queue") + task_token = resp["taskToken"] + + decisions = [{ + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity", + "version": "v1.1" + }, + "heartbeatTimeout": "60", + "input": "123", + "taskList": { + "name": "my-task-list" + }, + } + }] + resp = conn.respond_decision_task_completed( + task_token, decisions=decisions) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal([ + "WorkflowExecutionStarted", + "DecisionTaskScheduled", + "DecisionTaskStarted", + "DecisionTaskCompleted", + "ActivityTaskScheduled", + ]) + resp["events"][-1]["activityTaskScheduledEventAttributes"].should.equal({ + "decisionTaskCompletedEventId": 4, + "activityId": "my-activity-001", + "activityType": { + "name": "test-activity", + "version": "v1.1", + }, + "heartbeatTimeout": "60", + "input": "123", + "taskList": { + "name": "my-task-list" + }, + }) + + resp = conn.describe_workflow_execution( + "test-domain", conn.run_id, "uid-abcd1234") + resp["latestActivityTaskTimestamp"].should.equal(1420113600.0) diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 8edc76432..4004496ed 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,119 +1,119 @@ -import boto -from boto.swf.exceptions import SWFResponseError -import sure # noqa - -from moto import mock_swf_deprecated - - -# RegisterDomain endpoint -@mock_swf_deprecated -def test_register_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - all_domains = conn.list_domains("REGISTERED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - domain["status"].should.equal("REGISTERED") - domain["description"].should.equal("A test domain") - - -@mock_swf_deprecated -def test_register_already_existing_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - conn.register_domain.when.called_with( - "test-domain", "60", description="A test domain" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.register_domain.when.called_with( - "test-domain", 60, description="A test domain" - ).should.throw(SWFResponseError) - - -# ListDomains endpoint -@mock_swf_deprecated -def test_list_domains_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED") - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) - - -@mock_swf_deprecated -def test_list_domains_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("b-test-domain", "60") - conn.register_domain("a-test-domain", "60") - conn.register_domain("c-test-domain", "60") - - all_domains = conn.list_domains("REGISTERED", reverse_order=True) - names = [domain["name"] for domain in all_domains["domainInfos"]] - names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) - - -# DeprecateDomain endpoint -@mock_swf_deprecated -def test_deprecate_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - all_domains = conn.list_domains("DEPRECATED") - domain = all_domains["domainInfos"][0] - - domain["name"].should.equal("test-domain") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.deprecate_domain("test-domain") - - conn.deprecate_domain.when.called_with( - "test-domain" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.deprecate_domain.when.called_with( - "non-existent" - ).should.throw(SWFResponseError) - - -# DescribeDomain endpoint -@mock_swf_deprecated -def test_describe_domain(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - - domain = conn.describe_domain("test-domain") - domain["configuration"][ - "workflowExecutionRetentionPeriodInDays"].should.equal("60") - domain["domainInfo"]["description"].should.equal("A test domain") - domain["domainInfo"]["name"].should.equal("test-domain") - domain["domainInfo"]["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_domain(): - conn = boto.connect_swf("the_key", "the_secret") - - conn.describe_domain.when.called_with( - "non-existent" - ).should.throw(SWFResponseError) +import boto +from boto.swf.exceptions import SWFResponseError +import sure # noqa + +from moto import mock_swf_deprecated + + +# RegisterDomain endpoint +@mock_swf_deprecated +def test_register_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + all_domains = conn.list_domains("REGISTERED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + domain["status"].should.equal("REGISTERED") + domain["description"].should.equal("A test domain") + + +@mock_swf_deprecated +def test_register_already_existing_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + conn.register_domain.when.called_with( + "test-domain", "60", description="A test domain" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.register_domain.when.called_with( + "test-domain", 60, description="A test domain" + ).should.throw(SWFResponseError) + + +# ListDomains endpoint +@mock_swf_deprecated +def test_list_domains_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED") + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["a-test-domain", "b-test-domain", "c-test-domain"]) + + +@mock_swf_deprecated +def test_list_domains_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("b-test-domain", "60") + conn.register_domain("a-test-domain", "60") + conn.register_domain("c-test-domain", "60") + + all_domains = conn.list_domains("REGISTERED", reverse_order=True) + names = [domain["name"] for domain in all_domains["domainInfos"]] + names.should.equal(["c-test-domain", "b-test-domain", "a-test-domain"]) + + +# DeprecateDomain endpoint +@mock_swf_deprecated +def test_deprecate_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + all_domains = conn.list_domains("DEPRECATED") + domain = all_domains["domainInfos"][0] + + domain["name"].should.equal("test-domain") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.deprecate_domain("test-domain") + + conn.deprecate_domain.when.called_with( + "test-domain" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.deprecate_domain.when.called_with( + "non-existent" + ).should.throw(SWFResponseError) + + +# DescribeDomain endpoint +@mock_swf_deprecated +def test_describe_domain(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + + domain = conn.describe_domain("test-domain") + domain["configuration"][ + "workflowExecutionRetentionPeriodInDays"].should.equal("60") + domain["domainInfo"]["description"].should.equal("A test domain") + domain["domainInfo"]["name"].should.equal("test-domain") + domain["domainInfo"]["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_domain(): + conn = boto.connect_swf("the_key", "the_secret") + + conn.describe_domain.when.called_with( + "non-existent" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index f49c597a4..95d956f99 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,110 +1,110 @@ -from freezegun import freeze_time -import sure # noqa - -from moto import mock_swf_deprecated - -from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION - - -# Activity Task Heartbeat timeout -# Default value in workflow helpers: 5 mins -@mock_swf_deprecated -def test_activity_task_heartbeat_timeout(): - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - decision_token = conn.poll_for_decision_task( - "test-domain", "queue")["taskToken"] - conn.respond_decision_task_completed(decision_token, decisions=[ - SCHEDULE_ACTIVITY_TASK_DECISION - ]) - conn.poll_for_activity_task( - "test-domain", "activity-task-list", identity="surprise") - - with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") - - with freeze_time("2015-01-01 12:05:30"): - # => Activity Task Heartbeat timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") - attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] - attrs["timeoutType"].should.equal("HEARTBEAT") - # checks that event has been emitted at 12:05:00, not 12:05:30 - resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) - - resp["events"][-1]["eventType"].should.equal("DecisionTaskScheduled") - - -# Decision Task Start to Close timeout -# Default value in workflow helpers: 5 mins -@mock_swf_deprecated -def test_decision_task_start_to_close_timeout(): - pass - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - conn.poll_for_decision_task("test-domain", "queue")["taskToken"] - - with freeze_time("2015-01-01 12:04:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"] - ) - - with freeze_time("2015-01-01 12:05:30"): - # => Decision Task Start to Close timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted", - "DecisionTaskTimedOut", "DecisionTaskScheduled"] - ) - attrs = resp["events"][-2]["decisionTaskTimedOutEventAttributes"] - attrs.should.equal({ - "scheduledEventId": 2, "startedEventId": 3, "timeoutType": "START_TO_CLOSE" - }) - # checks that event has been emitted at 12:05:00, not 12:05:30 - resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) - - -# Workflow Execution Start to Close timeout -# Default value in workflow helpers: 2 hours -@mock_swf_deprecated -def test_workflow_execution_start_to_close_timeout(): - pass - with freeze_time("2015-01-01 12:00:00"): - conn = setup_workflow() - - with freeze_time("2015-01-01 13:59:30"): - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled"] - ) - - with freeze_time("2015-01-01 14:00:30"): - # => Workflow Execution Start to Close timeout reached!! - resp = conn.get_workflow_execution_history( - "test-domain", conn.run_id, "uid-abcd1234") - - event_types = [evt["eventType"] for evt in resp["events"]] - event_types.should.equal( - ["WorkflowExecutionStarted", "DecisionTaskScheduled", - "WorkflowExecutionTimedOut"] - ) - attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] - attrs.should.equal({ - "childPolicy": "ABANDON", "timeoutType": "START_TO_CLOSE" - }) - # checks that event has been emitted at 14:00:00, not 14:00:30 - resp["events"][-1]["eventTimestamp"].should.equal(1420120800.0) +from freezegun import freeze_time +import sure # noqa + +from moto import mock_swf_deprecated + +from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION + + +# Activity Task Heartbeat timeout +# Default value in workflow helpers: 5 mins +@mock_swf_deprecated +def test_activity_task_heartbeat_timeout(): + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + decision_token = conn.poll_for_decision_task( + "test-domain", "queue")["taskToken"] + conn.respond_decision_task_completed(decision_token, decisions=[ + SCHEDULE_ACTIVITY_TASK_DECISION + ]) + conn.poll_for_activity_task( + "test-domain", "activity-task-list", identity="surprise") + + with freeze_time("2015-01-01 12:04:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted") + + with freeze_time("2015-01-01 12:05:30"): + # => Activity Task Heartbeat timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut") + attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"] + attrs["timeoutType"].should.equal("HEARTBEAT") + # checks that event has been emitted at 12:05:00, not 12:05:30 + resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) + + resp["events"][-1]["eventType"].should.equal("DecisionTaskScheduled") + + +# Decision Task Start to Close timeout +# Default value in workflow helpers: 5 mins +@mock_swf_deprecated +def test_decision_task_start_to_close_timeout(): + pass + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + conn.poll_for_decision_task("test-domain", "queue")["taskToken"] + + with freeze_time("2015-01-01 12:04:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted"] + ) + + with freeze_time("2015-01-01 12:05:30"): + # => Decision Task Start to Close timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", "DecisionTaskStarted", + "DecisionTaskTimedOut", "DecisionTaskScheduled"] + ) + attrs = resp["events"][-2]["decisionTaskTimedOutEventAttributes"] + attrs.should.equal({ + "scheduledEventId": 2, "startedEventId": 3, "timeoutType": "START_TO_CLOSE" + }) + # checks that event has been emitted at 12:05:00, not 12:05:30 + resp["events"][-2]["eventTimestamp"].should.equal(1420113900.0) + + +# Workflow Execution Start to Close timeout +# Default value in workflow helpers: 2 hours +@mock_swf_deprecated +def test_workflow_execution_start_to_close_timeout(): + pass + with freeze_time("2015-01-01 12:00:00"): + conn = setup_workflow() + + with freeze_time("2015-01-01 13:59:30"): + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled"] + ) + + with freeze_time("2015-01-01 14:00:30"): + # => Workflow Execution Start to Close timeout reached!! + resp = conn.get_workflow_execution_history( + "test-domain", conn.run_id, "uid-abcd1234") + + event_types = [evt["eventType"] for evt in resp["events"]] + event_types.should.equal( + ["WorkflowExecutionStarted", "DecisionTaskScheduled", + "WorkflowExecutionTimedOut"] + ) + attrs = resp["events"][-1]["workflowExecutionTimedOutEventAttributes"] + attrs.should.equal({ + "childPolicy": "ABANDON", "timeoutType": "START_TO_CLOSE" + }) + # checks that event has been emitted at 14:00:00, not 14:00:30 + resp["events"][-1]["eventTimestamp"].should.equal(1420120800.0) diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 88e3caa75..2cb092260 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -1,262 +1,262 @@ -import boto -from boto.swf.exceptions import SWFResponseError -from datetime import datetime, timedelta - -import sure # noqa -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa - -from moto import mock_swf_deprecated -from moto.core.utils import unix_time - - -# Utils -@mock_swf_deprecated -def setup_swf_environment(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn.register_workflow_type( - "test-domain", "test-workflow", "v1.0", - task_list="queue", default_child_policy="TERMINATE", - default_execution_start_to_close_timeout="300", - default_task_start_to_close_timeout="300", - ) - conn.register_activity_type("test-domain", "test-activity", "v1.1") - return conn - - -# StartWorkflowExecution endpoint -@mock_swf_deprecated -def test_start_workflow_execution(): - conn = setup_swf_environment() - - wf = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - wf.should.contain("runId") - -@mock_swf_deprecated -def test_signal_workflow_execution(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - wfe = conn.signal_workflow_execution( - "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) - - wfe = conn.describe_workflow_execution( - "test-domain", run_id, "uid-abcd1234") - - wfe["openCounts"]["openDecisionTasks"].should.equal(2) - -@mock_swf_deprecated -def test_start_already_started_workflow_execution(): - conn = setup_swf_environment() - conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - - conn.start_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_start_workflow_execution_on_deprecated_type(): - conn = setup_swf_environment() - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.start_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeWorkflowExecution endpoint -@mock_swf_deprecated -def test_describe_workflow_execution(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - wfe = conn.describe_workflow_execution( - "test-domain", run_id, "uid-abcd1234") - wfe["executionInfo"]["execution"][ - "workflowId"].should.equal("uid-abcd1234") - wfe["executionInfo"]["executionStatus"].should.equal("OPEN") - - -@mock_swf_deprecated -def test_describe_non_existent_workflow_execution(): - conn = setup_swf_environment() - - conn.describe_workflow_execution.when.called_with( - "test-domain", "wrong-run-id", "wrong-workflow-id" - ).should.throw(SWFResponseError) - - -# GetWorkflowExecutionHistory endpoint -@mock_swf_deprecated -def test_get_workflow_execution_history(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - resp = conn.get_workflow_execution_history( - "test-domain", run_id, "uid-abcd1234") - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) - - -@mock_swf_deprecated -def test_get_workflow_execution_history_with_reverse_order(): - conn = setup_swf_environment() - hsh = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - run_id = hsh["runId"] - - resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", - reverse_order=True) - types = [evt["eventType"] for evt in resp["events"]] - types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"]) - - -@mock_swf_deprecated -def test_get_workflow_execution_history_on_non_existent_workflow_execution(): - conn = setup_swf_environment() - - conn.get_workflow_execution_history.when.called_with( - "test-domain", "wrong-run-id", "wrong-workflow-id" - ).should.throw(SWFResponseError) - - -# ListOpenWorkflowExecutions endpoint -@mock_swf_deprecated -def test_list_open_workflow_executions(): - conn = setup_swf_environment() - # One open workflow execution - conn.start_workflow_execution( - 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' - ) - # One closed workflow execution to make sure it isn't displayed - run_id = conn.start_workflow_execution( - 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' - )['runId'] - conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', - details='some details', - reason='a more complete reason', - run_id=run_id) - - yesterday = datetime.utcnow() - timedelta(days=1) - oldest_date = unix_time(yesterday) - response = conn.list_open_workflow_executions('test-domain', - oldest_date, - workflow_id='test-workflow') - execution_infos = response['executionInfos'] - len(execution_infos).should.equal(1) - open_workflow = execution_infos[0] - open_workflow['workflowType'].should.equal({'version': 'v1.0', - 'name': 'test-workflow'}) - open_workflow.should.contain('startTimestamp') - open_workflow['execution']['workflowId'].should.equal('uid-abcd1234') - open_workflow['execution'].should.contain('runId') - open_workflow['cancelRequested'].should.be(False) - open_workflow['executionStatus'].should.equal('OPEN') - - -# ListClosedWorkflowExecutions endpoint -@mock_swf_deprecated -def test_list_closed_workflow_executions(): - conn = setup_swf_environment() - # Leave one workflow execution open to make sure it isn't displayed - conn.start_workflow_execution( - 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' - ) - # One closed workflow execution - run_id = conn.start_workflow_execution( - 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' - )['runId'] - conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', - details='some details', - reason='a more complete reason', - run_id=run_id) - - yesterday = datetime.utcnow() - timedelta(days=1) - oldest_date = unix_time(yesterday) - response = conn.list_closed_workflow_executions( - 'test-domain', - start_oldest_date=oldest_date, - workflow_id='test-workflow') - execution_infos = response['executionInfos'] - len(execution_infos).should.equal(1) - open_workflow = execution_infos[0] - open_workflow['workflowType'].should.equal({'version': 'v1.0', - 'name': 'test-workflow'}) - open_workflow.should.contain('startTimestamp') - open_workflow['execution']['workflowId'].should.equal('uid-abcd12345') - open_workflow['execution'].should.contain('runId') - open_workflow['cancelRequested'].should.be(False) - open_workflow['executionStatus'].should.equal('CLOSED') - - -# TerminateWorkflowExecution endpoint -@mock_swf_deprecated -def test_terminate_workflow_execution(): - conn = setup_swf_environment() - run_id = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - )["runId"] - - resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234", - details="some details", - reason="a more complete reason", - run_id=run_id) - resp.should.be.none - - resp = conn.get_workflow_execution_history( - "test-domain", run_id, "uid-abcd1234") - evt = resp["events"][-1] - evt["eventType"].should.equal("WorkflowExecutionTerminated") - attrs = evt["workflowExecutionTerminatedEventAttributes"] - attrs["details"].should.equal("some details") - attrs["reason"].should.equal("a more complete reason") - attrs["cause"].should.equal("OPERATOR_INITIATED") - - -@mock_swf_deprecated -def test_terminate_workflow_execution_with_wrong_workflow_or_run_id(): - conn = setup_swf_environment() - run_id = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0" - )["runId"] - - # terminate workflow execution - conn.terminate_workflow_execution("test-domain", "uid-abcd1234") - - # already closed, with run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", run_id=run_id - ).should.throw( - SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" - ) - - # already closed, without run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234" - ).should.throw( - SWFResponseError, "Unknown execution, workflowId = uid-abcd1234" - ) - - # wrong workflow id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-non-existent" - ).should.throw( - SWFResponseError, "Unknown execution, workflowId = uid-non-existent" - ) - - # wrong run_id - conn.terminate_workflow_execution.when.called_with( - "test-domain", "uid-abcd1234", run_id="foo" - ).should.throw( - SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" - ) +import boto +from boto.swf.exceptions import SWFResponseError +from datetime import datetime, timedelta + +import sure # noqa +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises # noqa + +from moto import mock_swf_deprecated +from moto.core.utils import unix_time + + +# Utils +@mock_swf_deprecated +def setup_swf_environment(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn.register_workflow_type( + "test-domain", "test-workflow", "v1.0", + task_list="queue", default_child_policy="TERMINATE", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + conn.register_activity_type("test-domain", "test-activity", "v1.1") + return conn + + +# StartWorkflowExecution endpoint +@mock_swf_deprecated +def test_start_workflow_execution(): + conn = setup_swf_environment() + + wf = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + wf.should.contain("runId") + +@mock_swf_deprecated +def test_signal_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.signal_workflow_execution( + "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + + wfe["openCounts"]["openDecisionTasks"].should.equal(2) + +@mock_swf_deprecated +def test_start_already_started_workflow_execution(): + conn = setup_swf_environment() + conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + + conn.start_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_start_workflow_execution_on_deprecated_type(): + conn = setup_swf_environment() + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.start_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeWorkflowExecution endpoint +@mock_swf_deprecated +def test_describe_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + wfe["executionInfo"]["execution"][ + "workflowId"].should.equal("uid-abcd1234") + wfe["executionInfo"]["executionStatus"].should.equal("OPEN") + + +@mock_swf_deprecated +def test_describe_non_existent_workflow_execution(): + conn = setup_swf_environment() + + conn.describe_workflow_execution.when.called_with( + "test-domain", "wrong-run-id", "wrong-workflow-id" + ).should.throw(SWFResponseError) + + +# GetWorkflowExecutionHistory endpoint +@mock_swf_deprecated +def test_get_workflow_execution_history(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["WorkflowExecutionStarted", "DecisionTaskScheduled"]) + + +@mock_swf_deprecated +def test_get_workflow_execution_history_with_reverse_order(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + resp = conn.get_workflow_execution_history("test-domain", run_id, "uid-abcd1234", + reverse_order=True) + types = [evt["eventType"] for evt in resp["events"]] + types.should.equal(["DecisionTaskScheduled", "WorkflowExecutionStarted"]) + + +@mock_swf_deprecated +def test_get_workflow_execution_history_on_non_existent_workflow_execution(): + conn = setup_swf_environment() + + conn.get_workflow_execution_history.when.called_with( + "test-domain", "wrong-run-id", "wrong-workflow-id" + ).should.throw(SWFResponseError) + + +# ListOpenWorkflowExecutions endpoint +@mock_swf_deprecated +def test_list_open_workflow_executions(): + conn = setup_swf_environment() + # One open workflow execution + conn.start_workflow_execution( + 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' + ) + # One closed workflow execution to make sure it isn't displayed + run_id = conn.start_workflow_execution( + 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' + )['runId'] + conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', + details='some details', + reason='a more complete reason', + run_id=run_id) + + yesterday = datetime.utcnow() - timedelta(days=1) + oldest_date = unix_time(yesterday) + response = conn.list_open_workflow_executions('test-domain', + oldest_date, + workflow_id='test-workflow') + execution_infos = response['executionInfos'] + len(execution_infos).should.equal(1) + open_workflow = execution_infos[0] + open_workflow['workflowType'].should.equal({'version': 'v1.0', + 'name': 'test-workflow'}) + open_workflow.should.contain('startTimestamp') + open_workflow['execution']['workflowId'].should.equal('uid-abcd1234') + open_workflow['execution'].should.contain('runId') + open_workflow['cancelRequested'].should.be(False) + open_workflow['executionStatus'].should.equal('OPEN') + + +# ListClosedWorkflowExecutions endpoint +@mock_swf_deprecated +def test_list_closed_workflow_executions(): + conn = setup_swf_environment() + # Leave one workflow execution open to make sure it isn't displayed + conn.start_workflow_execution( + 'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0' + ) + # One closed workflow execution + run_id = conn.start_workflow_execution( + 'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0' + )['runId'] + conn.terminate_workflow_execution('test-domain', 'uid-abcd12345', + details='some details', + reason='a more complete reason', + run_id=run_id) + + yesterday = datetime.utcnow() - timedelta(days=1) + oldest_date = unix_time(yesterday) + response = conn.list_closed_workflow_executions( + 'test-domain', + start_oldest_date=oldest_date, + workflow_id='test-workflow') + execution_infos = response['executionInfos'] + len(execution_infos).should.equal(1) + open_workflow = execution_infos[0] + open_workflow['workflowType'].should.equal({'version': 'v1.0', + 'name': 'test-workflow'}) + open_workflow.should.contain('startTimestamp') + open_workflow['execution']['workflowId'].should.equal('uid-abcd12345') + open_workflow['execution'].should.contain('runId') + open_workflow['cancelRequested'].should.be(False) + open_workflow['executionStatus'].should.equal('CLOSED') + + +# TerminateWorkflowExecution endpoint +@mock_swf_deprecated +def test_terminate_workflow_execution(): + conn = setup_swf_environment() + run_id = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + )["runId"] + + resp = conn.terminate_workflow_execution("test-domain", "uid-abcd1234", + details="some details", + reason="a more complete reason", + run_id=run_id) + resp.should.be.none + + resp = conn.get_workflow_execution_history( + "test-domain", run_id, "uid-abcd1234") + evt = resp["events"][-1] + evt["eventType"].should.equal("WorkflowExecutionTerminated") + attrs = evt["workflowExecutionTerminatedEventAttributes"] + attrs["details"].should.equal("some details") + attrs["reason"].should.equal("a more complete reason") + attrs["cause"].should.equal("OPERATOR_INITIATED") + + +@mock_swf_deprecated +def test_terminate_workflow_execution_with_wrong_workflow_or_run_id(): + conn = setup_swf_environment() + run_id = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0" + )["runId"] + + # terminate workflow execution + conn.terminate_workflow_execution("test-domain", "uid-abcd1234") + + # already closed, with run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", run_id=run_id + ).should.throw( + SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" + ) + + # already closed, without run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234" + ).should.throw( + SWFResponseError, "Unknown execution, workflowId = uid-abcd1234" + ) + + # wrong workflow id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-non-existent" + ).should.throw( + SWFResponseError, "Unknown execution, workflowId = uid-non-existent" + ) + + # wrong run_id + conn.terminate_workflow_execution.when.called_with( + "test-domain", "uid-abcd1234", run_id="foo" + ).should.throw( + SWFResponseError, "WorkflowExecution=[workflowId=uid-abcd1234, runId=" + ) diff --git a/tests/test_swf/responses/test_workflow_types.py b/tests/test_swf/responses/test_workflow_types.py index 9e097a873..f0b39e7ad 100644 --- a/tests/test_swf/responses/test_workflow_types.py +++ b/tests/test_swf/responses/test_workflow_types.py @@ -1,137 +1,137 @@ -import sure -import boto - -from moto import mock_swf_deprecated -from boto.swf.exceptions import SWFResponseError - - -# RegisterWorkflowType endpoint -@mock_swf_deprecated -def test_register_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - - types = conn.list_workflow_types("test-domain", "REGISTERED") - actype = types["typeInfos"][0] - actype["workflowType"]["name"].should.equal("test-workflow") - actype["workflowType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_register_already_existing_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.register_workflow_type.when.called_with( - "test-domain", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_register_with_wrong_parameter_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.register_workflow_type.when.called_with( - "test-domain", "test-workflow", 12 - ).should.throw(SWFResponseError) - - -# ListWorkflowTypes endpoint -@mock_swf_deprecated -def test_list_workflow_types(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") - - all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") - names = [activity_type["workflowType"]["name"] - for activity_type in all_workflow_types["typeInfos"]] - names.should.equal( - ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) - - -@mock_swf_deprecated -def test_list_workflow_types_reverse_order(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") - conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") - - all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", - reverse_order=True) - names = [activity_type["workflowType"]["name"] - for activity_type in all_workflow_types["typeInfos"]] - names.should.equal( - ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) - - -# DeprecateWorkflowType endpoint -@mock_swf_deprecated -def test_deprecate_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - actypes = conn.list_workflow_types("test-domain", "DEPRECATED") - actype = actypes["typeInfos"][0] - actype["workflowType"]["name"].should.equal("test-workflow") - actype["workflowType"]["version"].should.equal("v1.0") - - -@mock_swf_deprecated -def test_deprecate_already_deprecated_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0") - conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") - - conn.deprecate_workflow_type.when.called_with( - "test-domain", "test-workflow", "v1.0" - ).should.throw(SWFResponseError) - - -@mock_swf_deprecated -def test_deprecate_non_existent_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.deprecate_workflow_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) - - -# DescribeWorkflowType endpoint -@mock_swf_deprecated -def test_describe_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - conn.register_workflow_type("test-domain", "test-workflow", "v1.0", - task_list="foo", default_child_policy="TERMINATE") - - actype = conn.describe_workflow_type( - "test-domain", "test-workflow", "v1.0") - actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") - actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") - actype["configuration"].keys().should_not.contain( - "defaultTaskStartToCloseTimeout") - infos = actype["typeInfo"] - infos["workflowType"]["name"].should.equal("test-workflow") - infos["workflowType"]["version"].should.equal("v1.0") - infos["status"].should.equal("REGISTERED") - - -@mock_swf_deprecated -def test_describe_non_existent_workflow_type(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60") - - conn.describe_workflow_type.when.called_with( - "test-domain", "non-existent", "v1.0" - ).should.throw(SWFResponseError) +import sure +import boto + +from moto import mock_swf_deprecated +from boto.swf.exceptions import SWFResponseError + + +# RegisterWorkflowType endpoint +@mock_swf_deprecated +def test_register_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + + types = conn.list_workflow_types("test-domain", "REGISTERED") + actype = types["typeInfos"][0] + actype["workflowType"]["name"].should.equal("test-workflow") + actype["workflowType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_register_already_existing_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.register_workflow_type.when.called_with( + "test-domain", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_register_with_wrong_parameter_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.register_workflow_type.when.called_with( + "test-domain", "test-workflow", 12 + ).should.throw(SWFResponseError) + + +# ListWorkflowTypes endpoint +@mock_swf_deprecated +def test_list_workflow_types(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") + + all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED") + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["a-test-workflow", "b-test-workflow", "c-test-workflow"]) + + +@mock_swf_deprecated +def test_list_workflow_types_reverse_order(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "b-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "a-test-workflow", "v1.0") + conn.register_workflow_type("test-domain", "c-test-workflow", "v1.0") + + all_workflow_types = conn.list_workflow_types("test-domain", "REGISTERED", + reverse_order=True) + names = [activity_type["workflowType"]["name"] + for activity_type in all_workflow_types["typeInfos"]] + names.should.equal( + ["c-test-workflow", "b-test-workflow", "a-test-workflow"]) + + +# DeprecateWorkflowType endpoint +@mock_swf_deprecated +def test_deprecate_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + actypes = conn.list_workflow_types("test-domain", "DEPRECATED") + actype = actypes["typeInfos"][0] + actype["workflowType"]["name"].should.equal("test-workflow") + actype["workflowType"]["version"].should.equal("v1.0") + + +@mock_swf_deprecated +def test_deprecate_already_deprecated_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0") + conn.deprecate_workflow_type("test-domain", "test-workflow", "v1.0") + + conn.deprecate_workflow_type.when.called_with( + "test-domain", "test-workflow", "v1.0" + ).should.throw(SWFResponseError) + + +@mock_swf_deprecated +def test_deprecate_non_existent_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.deprecate_workflow_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) + + +# DescribeWorkflowType endpoint +@mock_swf_deprecated +def test_describe_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + conn.register_workflow_type("test-domain", "test-workflow", "v1.0", + task_list="foo", default_child_policy="TERMINATE") + + actype = conn.describe_workflow_type( + "test-domain", "test-workflow", "v1.0") + actype["configuration"]["defaultTaskList"]["name"].should.equal("foo") + actype["configuration"]["defaultChildPolicy"].should.equal("TERMINATE") + actype["configuration"].keys().should_not.contain( + "defaultTaskStartToCloseTimeout") + infos = actype["typeInfo"] + infos["workflowType"]["name"].should.equal("test-workflow") + infos["workflowType"]["version"].should.equal("v1.0") + infos["status"].should.equal("REGISTERED") + + +@mock_swf_deprecated +def test_describe_non_existent_workflow_type(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60") + + conn.describe_workflow_type.when.called_with( + "test-domain", "non-existent", "v1.0" + ).should.throw(SWFResponseError) diff --git a/tests/test_swf/test_exceptions.py b/tests/test_swf/test_exceptions.py index 8617242b9..b91a697b9 100644 --- a/tests/test_swf/test_exceptions.py +++ b/tests/test_swf/test_exceptions.py @@ -1,158 +1,158 @@ -from __future__ import unicode_literals -import sure # noqa - -import json - -from moto.swf.exceptions import ( - SWFClientError, - SWFUnknownResourceFault, - SWFDomainAlreadyExistsFault, - SWFDomainDeprecatedFault, - SWFSerializationException, - SWFTypeAlreadyExistsFault, - SWFTypeDeprecatedFault, - SWFWorkflowExecutionAlreadyStartedFault, - SWFDefaultUndefinedFault, - SWFValidationException, - SWFDecisionValidationException, -) -from moto.swf.models import ( - WorkflowType, -) - - -def test_swf_client_error(): - ex = SWFClientError("ASpecificType", "error message") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "ASpecificType", - "message": "error message" - }) - - -def test_swf_unknown_resource_fault(): - ex = SWFUnknownResourceFault("type", "detail") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", - "message": "Unknown type: detail" - }) - - -def test_swf_unknown_resource_fault_with_only_one_parameter(): - ex = SWFUnknownResourceFault("foo bar baz") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", - "message": "Unknown foo bar baz" - }) - - -def test_swf_domain_already_exists_fault(): - ex = SWFDomainAlreadyExistsFault("domain-name") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", - "message": "domain-name" - }) - - -def test_swf_domain_deprecated_fault(): - ex = SWFDomainDeprecatedFault("domain-name") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DomainDeprecatedFault", - "message": "domain-name" - }) - - -def test_swf_serialization_exception(): - ex = SWFSerializationException("value") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#SerializationException", - "message": "class java.lang.Foo can not be converted to an String (not a real SWF exception ; happened on: value)" - }) - - -def test_swf_type_already_exists_fault(): - wft = WorkflowType("wf-name", "wf-version") - ex = SWFTypeAlreadyExistsFault(wft) - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", - "message": "WorkflowType=[name=wf-name, version=wf-version]" - }) - - -def test_swf_type_deprecated_fault(): - wft = WorkflowType("wf-name", "wf-version") - ex = SWFTypeDeprecatedFault(wft) - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#TypeDeprecatedFault", - "message": "WorkflowType=[name=wf-name, version=wf-version]" - }) - - -def test_swf_workflow_execution_already_started_fault(): - ex = SWFWorkflowExecutionAlreadyStartedFault() - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", - 'message': 'Already Started', - }) - - -def test_swf_default_undefined_fault(): - ex = SWFDefaultUndefinedFault("execution_start_to_close_timeout") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazonaws.swf.base.model#DefaultUndefinedFault", - "message": "executionStartToCloseTimeout", - }) - - -def test_swf_validation_exception(): - ex = SWFValidationException("Invalid token") - - ex.code.should.equal(400) - json.loads(ex.get_body()).should.equal({ - "__type": "com.amazon.coral.validate#ValidationException", - "message": "Invalid token", - }) - - -def test_swf_decision_validation_error(): - ex = SWFDecisionValidationException([ - {"type": "null_value", - "where": "decisions.1.member.startTimerDecisionAttributes.startToFireTimeout"}, - {"type": "bad_decision_type", - "value": "FooBar", - "where": "decisions.1.member.decisionType", - "possible_values": "Foo, Bar, Baz"}, - ]) - - ex.code.should.equal(400) - ex.error_type.should.equal("com.amazon.coral.validate#ValidationException") - - msg = ex.get_body() - msg.should.match(r"2 validation errors detected:") - msg.should.match( - r"Value null at 'decisions.1.member.startTimerDecisionAttributes.startToFireTimeout' " - r"failed to satisfy constraint: Member must not be null;" - ) - msg.should.match( - r"Value 'FooBar' at 'decisions.1.member.decisionType' failed to satisfy constraint: " - r"Member must satisfy enum value set: \[Foo, Bar, Baz\]" - ) +from __future__ import unicode_literals +import sure # noqa + +import json + +from moto.swf.exceptions import ( + SWFClientError, + SWFUnknownResourceFault, + SWFDomainAlreadyExistsFault, + SWFDomainDeprecatedFault, + SWFSerializationException, + SWFTypeAlreadyExistsFault, + SWFTypeDeprecatedFault, + SWFWorkflowExecutionAlreadyStartedFault, + SWFDefaultUndefinedFault, + SWFValidationException, + SWFDecisionValidationException, +) +from moto.swf.models import ( + WorkflowType, +) + + +def test_swf_client_error(): + ex = SWFClientError("ASpecificType", "error message") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "ASpecificType", + "message": "error message" + }) + + +def test_swf_unknown_resource_fault(): + ex = SWFUnknownResourceFault("type", "detail") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", + "message": "Unknown type: detail" + }) + + +def test_swf_unknown_resource_fault_with_only_one_parameter(): + ex = SWFUnknownResourceFault("foo bar baz") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#UnknownResourceFault", + "message": "Unknown foo bar baz" + }) + + +def test_swf_domain_already_exists_fault(): + ex = SWFDomainAlreadyExistsFault("domain-name") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DomainAlreadyExistsFault", + "message": "domain-name" + }) + + +def test_swf_domain_deprecated_fault(): + ex = SWFDomainDeprecatedFault("domain-name") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DomainDeprecatedFault", + "message": "domain-name" + }) + + +def test_swf_serialization_exception(): + ex = SWFSerializationException("value") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#SerializationException", + "message": "class java.lang.Foo can not be converted to an String (not a real SWF exception ; happened on: value)" + }) + + +def test_swf_type_already_exists_fault(): + wft = WorkflowType("wf-name", "wf-version") + ex = SWFTypeAlreadyExistsFault(wft) + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#TypeAlreadyExistsFault", + "message": "WorkflowType=[name=wf-name, version=wf-version]" + }) + + +def test_swf_type_deprecated_fault(): + wft = WorkflowType("wf-name", "wf-version") + ex = SWFTypeDeprecatedFault(wft) + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#TypeDeprecatedFault", + "message": "WorkflowType=[name=wf-name, version=wf-version]" + }) + + +def test_swf_workflow_execution_already_started_fault(): + ex = SWFWorkflowExecutionAlreadyStartedFault() + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#WorkflowExecutionAlreadyStartedFault", + 'message': 'Already Started', + }) + + +def test_swf_default_undefined_fault(): + ex = SWFDefaultUndefinedFault("execution_start_to_close_timeout") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazonaws.swf.base.model#DefaultUndefinedFault", + "message": "executionStartToCloseTimeout", + }) + + +def test_swf_validation_exception(): + ex = SWFValidationException("Invalid token") + + ex.code.should.equal(400) + json.loads(ex.get_body()).should.equal({ + "__type": "com.amazon.coral.validate#ValidationException", + "message": "Invalid token", + }) + + +def test_swf_decision_validation_error(): + ex = SWFDecisionValidationException([ + {"type": "null_value", + "where": "decisions.1.member.startTimerDecisionAttributes.startToFireTimeout"}, + {"type": "bad_decision_type", + "value": "FooBar", + "where": "decisions.1.member.decisionType", + "possible_values": "Foo, Bar, Baz"}, + ]) + + ex.code.should.equal(400) + ex.error_type.should.equal("com.amazon.coral.validate#ValidationException") + + msg = ex.get_body() + msg.should.match(r"2 validation errors detected:") + msg.should.match( + r"Value null at 'decisions.1.member.startTimerDecisionAttributes.startToFireTimeout' " + r"failed to satisfy constraint: Member must not be null;" + ) + msg.should.match( + r"Value 'FooBar' at 'decisions.1.member.decisionType' failed to satisfy constraint: " + r"Member must satisfy enum value set: \[Foo, Bar, Baz\]" + ) diff --git a/tests/test_swf/test_utils.py b/tests/test_swf/test_utils.py index ffa147037..2e04b990c 100644 --- a/tests/test_swf/test_utils.py +++ b/tests/test_swf/test_utils.py @@ -1,13 +1,13 @@ -import sure # noqa - -from moto.swf.utils import decapitalize - - -def test_decapitalize(): - cases = { - "fooBar": "fooBar", - "FooBar": "fooBar", - "FOO BAR": "fOO BAR", - } - for before, after in cases.items(): - decapitalize(before).should.equal(after) +import sure # noqa + +from moto.swf.utils import decapitalize + + +def test_decapitalize(): + cases = { + "fooBar": "fooBar", + "FooBar": "fooBar", + "FOO BAR": "fOO BAR", + } + for before, after in cases.items(): + decapitalize(before).should.equal(after) diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 2197b71df..4879a0011 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -1,100 +1,100 @@ -import boto - -from moto.swf.models import ( - ActivityType, - Domain, - WorkflowType, - WorkflowExecution, -) - - -# Some useful constants -# Here are some activity timeouts we use in moto/swf tests ; they're extracted -# from semi-real world example, the goal is mostly to have predictible and -# intuitive behaviour in moto/swf own tests... -ACTIVITY_TASK_TIMEOUTS = { - "heartbeatTimeout": "300", # 5 mins - "scheduleToStartTimeout": "1800", # 30 mins - "startToCloseTimeout": "1800", # 30 mins - "scheduleToCloseTimeout": "2700", # 45 mins -} - -# Some useful decisions -SCHEDULE_ACTIVITY_TASK_DECISION = { - "decisionType": "ScheduleActivityTask", - "scheduleActivityTaskDecisionAttributes": { - "activityId": "my-activity-001", - "activityType": {"name": "test-activity", "version": "v1.1"}, - "taskList": {"name": "activity-task-list"}, - } -} -for key, value in ACTIVITY_TASK_TIMEOUTS.items(): - SCHEDULE_ACTIVITY_TASK_DECISION[ - "scheduleActivityTaskDecisionAttributes"][key] = value - - -# A test Domain -def get_basic_domain(): - return Domain("test-domain", "90") - - -# A test WorkflowType -def _generic_workflow_type_attributes(): - return [ - "test-workflow", "v1.0" - ], { - "task_list": "queue", - "default_child_policy": "ABANDON", - "default_execution_start_to_close_timeout": "7200", - "default_task_start_to_close_timeout": "300", - } - - -def get_basic_workflow_type(): - args, kwargs = _generic_workflow_type_attributes() - return WorkflowType(*args, **kwargs) - - -def mock_basic_workflow_type(domain_name, conn): - args, kwargs = _generic_workflow_type_attributes() - conn.register_workflow_type(domain_name, *args, **kwargs) - return conn - - -# A test WorkflowExecution -def make_workflow_execution(**kwargs): - domain = get_basic_domain() - domain.add_type(ActivityType("test-activity", "v1.1")) - wft = get_basic_workflow_type() - return WorkflowExecution(domain, wft, "ab1234", **kwargs) - - -# Makes decision tasks start automatically on a given workflow -def auto_start_decision_tasks(wfe): - wfe.schedule_decision_task = wfe.schedule_and_start_decision_task - return wfe - - -# Setup a complete example workflow and return the connection object -def setup_workflow(): - conn = boto.connect_swf("the_key", "the_secret") - conn.register_domain("test-domain", "60", description="A test domain") - conn = mock_basic_workflow_type("test-domain", conn) - conn.register_activity_type( - "test-domain", "test-activity", "v1.1", - default_task_heartbeat_timeout="600", - default_task_schedule_to_close_timeout="600", - default_task_schedule_to_start_timeout="600", - default_task_start_to_close_timeout="600", - ) - wfe = conn.start_workflow_execution( - "test-domain", "uid-abcd1234", "test-workflow", "v1.0") - conn.run_id = wfe["runId"] - return conn - - -# A helper for processing the first timeout on a given object -def process_first_timeout(obj): - _timeout = obj.first_timeout() - if _timeout: - obj.timeout(_timeout) +import boto + +from moto.swf.models import ( + ActivityType, + Domain, + WorkflowType, + WorkflowExecution, +) + + +# Some useful constants +# Here are some activity timeouts we use in moto/swf tests ; they're extracted +# from semi-real world example, the goal is mostly to have predictible and +# intuitive behaviour in moto/swf own tests... +ACTIVITY_TASK_TIMEOUTS = { + "heartbeatTimeout": "300", # 5 mins + "scheduleToStartTimeout": "1800", # 30 mins + "startToCloseTimeout": "1800", # 30 mins + "scheduleToCloseTimeout": "2700", # 45 mins +} + +# Some useful decisions +SCHEDULE_ACTIVITY_TASK_DECISION = { + "decisionType": "ScheduleActivityTask", + "scheduleActivityTaskDecisionAttributes": { + "activityId": "my-activity-001", + "activityType": {"name": "test-activity", "version": "v1.1"}, + "taskList": {"name": "activity-task-list"}, + } +} +for key, value in ACTIVITY_TASK_TIMEOUTS.items(): + SCHEDULE_ACTIVITY_TASK_DECISION[ + "scheduleActivityTaskDecisionAttributes"][key] = value + + +# A test Domain +def get_basic_domain(): + return Domain("test-domain", "90") + + +# A test WorkflowType +def _generic_workflow_type_attributes(): + return [ + "test-workflow", "v1.0" + ], { + "task_list": "queue", + "default_child_policy": "ABANDON", + "default_execution_start_to_close_timeout": "7200", + "default_task_start_to_close_timeout": "300", + } + + +def get_basic_workflow_type(): + args, kwargs = _generic_workflow_type_attributes() + return WorkflowType(*args, **kwargs) + + +def mock_basic_workflow_type(domain_name, conn): + args, kwargs = _generic_workflow_type_attributes() + conn.register_workflow_type(domain_name, *args, **kwargs) + return conn + + +# A test WorkflowExecution +def make_workflow_execution(**kwargs): + domain = get_basic_domain() + domain.add_type(ActivityType("test-activity", "v1.1")) + wft = get_basic_workflow_type() + return WorkflowExecution(domain, wft, "ab1234", **kwargs) + + +# Makes decision tasks start automatically on a given workflow +def auto_start_decision_tasks(wfe): + wfe.schedule_decision_task = wfe.schedule_and_start_decision_task + return wfe + + +# Setup a complete example workflow and return the connection object +def setup_workflow(): + conn = boto.connect_swf("the_key", "the_secret") + conn.register_domain("test-domain", "60", description="A test domain") + conn = mock_basic_workflow_type("test-domain", conn) + conn.register_activity_type( + "test-domain", "test-activity", "v1.1", + default_task_heartbeat_timeout="600", + default_task_schedule_to_close_timeout="600", + default_task_schedule_to_start_timeout="600", + default_task_start_to_close_timeout="600", + ) + wfe = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + conn.run_id = wfe["runId"] + return conn + + +# A helper for processing the first timeout on a given object +def process_first_timeout(obj): + _timeout = obj.first_timeout() + if _timeout: + obj.timeout(_timeout) diff --git a/tests/test_xray/test_xray_boto3.py b/tests/test_xray/test_xray_boto3.py index 5ad8f8bc7..c754e3a69 100644 --- a/tests/test_xray/test_xray_boto3.py +++ b/tests/test_xray/test_xray_boto3.py @@ -1,139 +1,139 @@ -from __future__ import unicode_literals - -import boto3 -import json -import botocore.exceptions -import sure # noqa - -from moto import mock_xray - -import datetime - - -@mock_xray -def test_put_telemetry(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_telemetry_records( - TelemetryRecords=[ - { - 'Timestamp': datetime.datetime(2015, 1, 1), - 'SegmentsReceivedCount': 123, - 'SegmentsSentCount': 123, - 'SegmentsSpilloverCount': 123, - 'SegmentsRejectedCount': 123, - 'BackendConnectionErrors': { - 'TimeoutCount': 123, - 'ConnectionRefusedCount': 123, - 'HTTPCode4XXCount': 123, - 'HTTPCode5XXCount': 123, - 'UnknownHostCount': 123, - 'OtherCount': 123 - } - }, - ], - EC2InstanceId='string', - Hostname='string', - ResourceARN='string' - ) - - -@mock_xray -def test_put_trace_segments(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1.478293361449E9 - }) - ] - ) - - -@mock_xray -def test_trace_summary(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'in_progress': True - }), - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0b', - 'start_time': 1478293365, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1478293385 - }) - ] - ) - - client.get_trace_summaries( - StartTime=datetime.datetime(2014, 1, 1), - EndTime=datetime.datetime(2017, 1, 1) - ) - - -@mock_xray -def test_batch_get_trace(): - client = boto3.client('xray', region_name='us-east-1') - - client.put_trace_segments( - TraceSegmentDocuments=[ - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0a', - 'start_time': 1.478293361271E9, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'in_progress': True - }), - json.dumps({ - 'name': 'example.com', - 'id': '70de5b6f19ff9a0b', - 'start_time': 1478293365, - 'trace_id': '1-581cf771-a006649127e371903a2de979', - 'end_time': 1478293385 - }) - ] - ) - - resp = client.batch_get_traces( - TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] - ) - len(resp['UnprocessedTraceIds']).should.equal(1) - len(resp['Traces']).should.equal(1) - - -# Following are not implemented, just testing it returns what boto expects -@mock_xray -def test_batch_get_service_graph(): - client = boto3.client('xray', region_name='us-east-1') - - client.get_service_graph( - StartTime=datetime.datetime(2014, 1, 1), - EndTime=datetime.datetime(2017, 1, 1) - ) - - -@mock_xray -def test_batch_get_trace_graph(): - client = boto3.client('xray', region_name='us-east-1') - - client.batch_get_traces( - TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] - ) - - - - - +from __future__ import unicode_literals + +import boto3 +import json +import botocore.exceptions +import sure # noqa + +from moto import mock_xray + +import datetime + + +@mock_xray +def test_put_telemetry(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_telemetry_records( + TelemetryRecords=[ + { + 'Timestamp': datetime.datetime(2015, 1, 1), + 'SegmentsReceivedCount': 123, + 'SegmentsSentCount': 123, + 'SegmentsSpilloverCount': 123, + 'SegmentsRejectedCount': 123, + 'BackendConnectionErrors': { + 'TimeoutCount': 123, + 'ConnectionRefusedCount': 123, + 'HTTPCode4XXCount': 123, + 'HTTPCode5XXCount': 123, + 'UnknownHostCount': 123, + 'OtherCount': 123 + } + }, + ], + EC2InstanceId='string', + Hostname='string', + ResourceARN='string' + ) + + +@mock_xray +def test_put_trace_segments(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1.478293361449E9 + }) + ] + ) + + +@mock_xray +def test_trace_summary(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + client.get_trace_summaries( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) + + +@mock_xray +def test_batch_get_trace(): + client = boto3.client('xray', region_name='us-east-1') + + client.put_trace_segments( + TraceSegmentDocuments=[ + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0a', + 'start_time': 1.478293361271E9, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'in_progress': True + }), + json.dumps({ + 'name': 'example.com', + 'id': '70de5b6f19ff9a0b', + 'start_time': 1478293365, + 'trace_id': '1-581cf771-a006649127e371903a2de979', + 'end_time': 1478293385 + }) + ] + ) + + resp = client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + len(resp['UnprocessedTraceIds']).should.equal(1) + len(resp['Traces']).should.equal(1) + + +# Following are not implemented, just testing it returns what boto expects +@mock_xray +def test_batch_get_service_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.get_service_graph( + StartTime=datetime.datetime(2014, 1, 1), + EndTime=datetime.datetime(2017, 1, 1) + ) + + +@mock_xray +def test_batch_get_trace_graph(): + client = boto3.client('xray', region_name='us-east-1') + + client.batch_get_traces( + TraceIds=['1-581cf771-a006649127e371903a2de979', '1-581cf772-b006649127e371903a2de979'] + ) + + + + + diff --git a/tests/test_xray/test_xray_client.py b/tests/test_xray/test_xray_client.py index 0cd948950..8e7b84be0 100644 --- a/tests/test_xray/test_xray_client.py +++ b/tests/test_xray/test_xray_client.py @@ -1,72 +1,72 @@ -from __future__ import unicode_literals -from moto import mock_xray_client, XRaySegment, mock_dynamodb2 -import sure # noqa -import boto3 - -from moto.xray.mock_client import MockEmitter -import aws_xray_sdk.core as xray_core -import aws_xray_sdk.core.patcher as xray_core_patcher - -import botocore.client -import botocore.endpoint -original_make_api_call = botocore.client.BaseClient._make_api_call -original_encode_headers = botocore.endpoint.Endpoint._encode_headers - -import requests -original_session_request = requests.Session.request -original_session_prep_request = requests.Session.prepare_request - - -@mock_xray_client -@mock_dynamodb2 -def test_xray_dynamo_request_id(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - client = boto3.client('dynamodb', region_name='us-east-1') - - with XRaySegment(): - resp = client.list_tables() - resp['ResponseMetadata'].should.contain('RequestId') - id1 = resp['ResponseMetadata']['RequestId'] - - with XRaySegment(): - client.list_tables() - resp = client.list_tables() - id2 = resp['ResponseMetadata']['RequestId'] - - id1.should_not.equal(id2) - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) - - -@mock_xray_client -def test_xray_udp_emitter_patched(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) - - -@mock_xray_client -def test_xray_context_patched(): - # Could be ran in any order, so we need to tell sdk that its been unpatched - xray_core_patcher._PATCHED_MODULES = set() - xray_core.patch_all() - - xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') - - setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) - setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) - setattr(requests.Session, 'request', original_session_request) - setattr(requests.Session, 'prepare_request', original_session_prep_request) +from __future__ import unicode_literals +from moto import mock_xray_client, XRaySegment, mock_dynamodb2 +import sure # noqa +import boto3 + +from moto.xray.mock_client import MockEmitter +import aws_xray_sdk.core as xray_core +import aws_xray_sdk.core.patcher as xray_core_patcher + +import botocore.client +import botocore.endpoint +original_make_api_call = botocore.client.BaseClient._make_api_call +original_encode_headers = botocore.endpoint.Endpoint._encode_headers + +import requests +original_session_request = requests.Session.request +original_session_prep_request = requests.Session.prepare_request + + +@mock_xray_client +@mock_dynamodb2 +def test_xray_dynamo_request_id(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + client = boto3.client('dynamodb', region_name='us-east-1') + + with XRaySegment(): + resp = client.list_tables() + resp['ResponseMetadata'].should.contain('RequestId') + id1 = resp['ResponseMetadata']['RequestId'] + + with XRaySegment(): + client.list_tables() + resp = client.list_tables() + id2 = resp['ResponseMetadata']['RequestId'] + + id1.should_not.equal(id2) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_udp_emitter_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_context_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) diff --git a/tox.ini b/tox.ini index 0f3f1466a..7c5ed1ef7 100644 --- a/tox.ini +++ b/tox.ini @@ -1,14 +1,14 @@ -[tox] -envlist = py27, py36 - -[testenv] -deps = - -r{toxinidir}/requirements.txt - -r{toxinidir}/requirements-dev.txt -commands = - {envpython} setup.py test - nosetests {posargs} - -[flake8] -ignore = E128,E501 -exclude = moto/packages,dist +[tox] +envlist = py27, py36 + +[testenv] +deps = + -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements-dev.txt +commands = + {envpython} setup.py test + nosetests {posargs} + +[flake8] +ignore = E128,E501 +exclude = moto/packages,dist diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 902644b20..3c6947fd9 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -set -e -pip install flask -pip install /moto/dist/moto*.gz +#!/usr/bin/env bash +set -e +pip install flask +pip install /moto/dist/moto*.gz moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file diff --git a/wait_for.py b/wait_for.py index d313ea5a9..cba4bc665 100755 --- a/wait_for.py +++ b/wait_for.py @@ -1,31 +1,31 @@ -import time - -try: - # py2 - import urllib2 as urllib - from urllib2 import URLError - import socket - import httplib - - EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) -except ImportError: - # py3 - import urllib.request as urllib - from urllib.error import URLError - - EXCEPTIONS = (URLError, ConnectionResetError) - - -start_ts = time.time() -print("Waiting for service to come up") -while True: - try: - urllib.urlopen('http://localhost:5000/', timeout=1) - break - except EXCEPTIONS: - elapsed_s = time.time() - start_ts - if elapsed_s > 60: - raise - - print('.') - time.sleep(1) +import time + +try: + # py2 + import urllib2 as urllib + from urllib2 import URLError + import socket + import httplib + + EXCEPTIONS = (URLError, socket.error, httplib.BadStatusLine) +except ImportError: + # py3 + import urllib.request as urllib + from urllib.error import URLError + + EXCEPTIONS = (URLError, ConnectionResetError) + + +start_ts = time.time() +print("Waiting for service to come up") +while True: + try: + urllib.urlopen('http://localhost:5000/', timeout=1) + break + except EXCEPTIONS: + elapsed_s = time.time() - start_ts + if elapsed_s > 60: + raise + + print('.') + time.sleep(1) From 3ea673b3d04c866f9301e3714cd0578a24883000 Mon Sep 17 00:00:00 2001 From: Stephan Date: Fri, 21 Dec 2018 16:30:17 +0100 Subject: [PATCH 009/125] started with mocking job execution --- moto/iot/models.py | 1521 +++++++++++++++++++----------------- moto/iot/responses.py | 1004 ++++++++++++------------ tests/test_iot/test_iot.py | 39 + 3 files changed, 1331 insertions(+), 1233 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4bcab26eb..1279a5baa 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -1,736 +1,785 @@ -from __future__ import unicode_literals - -import hashlib -import random -import re -import string -import time -import uuid -from collections import OrderedDict -from datetime import datetime - -import boto3 - -from moto.core import BaseBackend, BaseModel -from .exceptions import ( - ResourceNotFoundException, - InvalidRequestException, - VersionConflictException -) - - -class FakeThing(BaseModel): - def __init__(self, thing_name, thing_type, attributes, region_name): - self.region_name = region_name - self.thing_name = thing_name - self.thing_type = thing_type - self.attributes = attributes - self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) - self.version = 1 - # TODO: we need to handle 'version'? - - # for iot-data - self.thing_shadow = None - - def to_dict(self, include_default_client_id=False): - obj = { - 'thingName': self.thing_name, - 'thingArn': self.arn, - 'attributes': self.attributes, - 'version': self.version - } - if self.thing_type: - obj['thingTypeName'] = self.thing_type.thing_type_name - if include_default_client_id: - obj['defaultClientId'] = self.thing_name - return obj - - -class FakeThingType(BaseModel): - def __init__(self, thing_type_name, thing_type_properties, region_name): - self.region_name = region_name - self.thing_type_name = thing_type_name - self.thing_type_properties = thing_type_properties - self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id - t = time.time() - self.metadata = { - 'deprecated': False, - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) - - def to_dict(self): - return { - 'thingTypeName': self.thing_type_name, - 'thingTypeId': self.thing_type_id, - 'thingTypeProperties': self.thing_type_properties, - 'thingTypeMetadata': self.metadata - } - - -class FakeThingGroup(BaseModel): - def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): - self.region_name = region_name - self.thing_group_name = thing_group_name - self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id - self.version = 1 # TODO: tmp - self.parent_group_name = parent_group_name - self.thing_group_properties = thing_group_properties or {} - t = time.time() - self.metadata = { - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) - self.things = OrderedDict() - - def to_dict(self): - return { - 'thingGroupName': self.thing_group_name, - 'thingGroupId': self.thing_group_id, - 'version': self.version, - 'thingGroupProperties': self.thing_group_properties, - 'thingGroupMetadata': self.metadata - } - - -class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name): - m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode('utf-8')) - self.certificate_id = m.hexdigest() - self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) - self.certificate_pem = certificate_pem - self.status = status - - # TODO: must adjust - self.owner = '1' - self.transfer_data = {} - self.creation_date = time.time() - self.last_modified_date = self.creation_date - self.ca_certificate_id = None - - def to_dict(self): - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'creationDate': self.creation_date - } - - def to_description_dict(self): - """ - You might need keys below in some situation - - caCertificateId - - previousOwnedBy - """ - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'certificatePem': self.certificate_pem, - 'ownedBy': self.owner, - 'creationDate': self.creation_date, - 'lastModifiedDate': self.last_modified_date, - 'transferData': self.transfer_data - } - - -class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): - self.name = name - self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.default_version_id = default_version_id - self.versions = [FakePolicyVersion(self.name, document, True, region_name)] - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id - } - - def to_dict_at_creation(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id - } - - def to_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - } - - -class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): - self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) - self.document = document or {} - self.is_default = is_default - self.version_id = '1' - - self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id - } - - def to_dict_at_creation(self): - return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default - } - - def to_dict(self): - return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, - } - - -class FakeJob(BaseModel): - JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" - JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) - - def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, region_name): - if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): - raise InvalidRequestException() - - self.region_name = region_name - self.job_id = job_id - self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) - self.targets = targets - self.document_source = document_source - self.document = document - self.description = description - self.presigned_url_config = presigned_url_config - self.target_selection = target_selection - self.job_executions_rollout_config = job_executions_rollout_config - self.status = None # IN_PROGRESS | CANCELED | COMPLETED - self.comment = None - self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.completed_at = None - self.job_process_details = { - 'processingTargets': targets, - 'numberOfQueuedThings': 1, - 'numberOfCanceledThings': 0, - 'numberOfSucceededThings': 0, - 'numberOfFailedThings': 0, - 'numberOfRejectedThings': 0, - 'numberOfInProgressThings': 0, - 'numberOfRemovedThings': 0 - } - self.document_parameters = document_parameters - - def to_dict(self): - obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source - } - - return obj - - def _job_id_matcher(self, regex, argument): - regex_match = regex.match(argument) - length_match = len(argument) <= 64 - return regex_match and length_match - - -class IoTBackend(BaseBackend): - def __init__(self, region_name=None): - super(IoTBackend, self).__init__() - self.region_name = region_name - self.things = OrderedDict() - self.jobs = OrderedDict() - self.thing_types = OrderedDict() - self.thing_groups = OrderedDict() - self.certificates = OrderedDict() - self.policies = OrderedDict() - self.principal_policies = OrderedDict() - self.principal_things = OrderedDict() - - def reset(self): - region_name = self.region_name - self.__dict__ = {} - self.__init__(region_name) - - def create_thing(self, thing_name, thing_type_name, attribute_payload): - thing_types = self.list_thing_types() - thing_type = None - if thing_type_name: - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - if attribute_payload is None: - attributes = {} - elif 'attributes' not in attribute_payload: - attributes = {} - else: - attributes = attribute_payload['attributes'] - thing = FakeThing(thing_name, thing_type, attributes, self.region_name) - self.things[thing.arn] = thing - return thing.thing_name, thing.arn - - def create_thing_type(self, thing_type_name, thing_type_properties): - if thing_type_properties is None: - thing_type_properties = {} - thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) - self.thing_types[thing_type.arn] = thing_type - return thing_type.thing_type_name, thing_type.arn - - def list_thing_types(self, thing_type_name=None): - if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match - return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] - return self.thing_types.values() - - def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): - all_things = [_.to_dict() for _ in self.things.values()] - if attribute_name is not None and thing_type_name is not None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value and - "thingTypeName" in elem and - elem["thingTypeName"] == thing_type_name, all_things)) - elif attribute_name is not None and thing_type_name is None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value, all_things)) - elif attribute_name is None and thing_type_name is not None: - filtered_things = list( - filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) - else: - filtered_things = all_things - - if token is None: - things = filtered_things[0:max_results] - next_token = str(max_results) if len(filtered_things) > max_results else None - else: - token = int(token) - things = filtered_things[token:token + max_results] - next_token = str(token + max_results) if len(filtered_things) > token + max_results else None - - return things, next_token - - def describe_thing(self, thing_name): - things = [_ for _ in self.things.values() if _.thing_name == thing_name] - if len(things) == 0: - raise ResourceNotFoundException() - return things[0] - - def describe_thing_type(self, thing_type_name): - thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] - if len(thing_types) == 0: - raise ResourceNotFoundException() - return thing_types[0] - - def delete_thing(self, thing_name, expected_version): - # TODO: handle expected_version - - # can raise ResourceNotFoundError - thing = self.describe_thing(thing_name) - del self.things[thing.arn] - - def delete_thing_type(self, thing_type_name): - # can raise ResourceNotFoundError - thing_type = self.describe_thing_type(thing_type_name) - del self.thing_types[thing_type.arn] - - def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): - # if attributes payload = {}, nothing - thing = self.describe_thing(thing_name) - thing_type = None - - if remove_thing_type and thing_type_name: - raise InvalidRequestException() - - # thing_type - if thing_type_name: - thing_types = self.list_thing_types() - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - thing.thing_type = thing_type - - if remove_thing_type: - thing.thing_type = None - - # attribute - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing.attributes = attributes - else: - thing.attributes.update(attributes) - - def _random_string(self): - n = 20 - random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) - return random_str - - def create_keys_and_certificate(self, set_as_active): - # implement here - # caCertificate can be blank - key_pair = { - 'PublicKey': self._random_string(), - 'PrivateKey': self._random_string() - } - certificate_pem = self._random_string() - status = 'ACTIVE' if set_as_active else 'INACTIVE' - certificate = FakeCertificate(certificate_pem, status, self.region_name) - self.certificates[certificate.certificate_id] = certificate - return certificate, key_pair - - def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) - del self.certificates[certificate_id] - - def describe_certificate(self, certificate_id): - certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] - if len(certs) == 0: - raise ResourceNotFoundException() - return certs[0] - - def list_certificates(self): - return self.certificates.values() - - def update_certificate(self, certificate_id, new_status): - cert = self.describe_certificate(certificate_id) - # TODO: validate new_status - cert.status = new_status - - def create_policy(self, policy_name, policy_document): - policy = FakePolicy(policy_name, policy_document, self.region_name) - self.policies[policy.name] = policy - return policy - - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_attached_policies(self, target): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] - return policies - - def list_policies(self): - policies = self.policies.values() - return policies - - def get_policy(self, policy_name): - policies = [_ for _ in self.policies.values() if _.name == policy_name] - if len(policies) == 0: - raise ResourceNotFoundException() - return policies[0] - - def delete_policy(self, policy_name): - policy = self.get_policy(policy_name) - del self.policies[policy.name] - - def create_policy_version(self, policy_name, policy_document, set_as_default): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) - policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) - if set_as_default: - self.set_default_policy_version(policy_name, version.version_id) - return version - - def set_default_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - version.is_default = True - policy.default_version_id = version.version_id - policy.document = version.document - else: - version.is_default = False - - def get_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - return version - raise ResourceNotFoundException() - - def list_policy_versions(self, policy_name): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - return policy.versions - - def delete_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - if version_id == policy.default_version_id: - raise InvalidRequestException( - "Cannot delete the default version of a policy") - for i, v in enumerate(policy.versions): - if v.version_id == version_id: - del policy.versions[i] - return - raise ResourceNotFoundException() - - def _get_principal(self, principal_arn): - """ - raise ResourceNotFoundException - """ - if ':cert/' in principal_arn: - certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] - if len(certs) == 0: - raise ResourceNotFoundException() - principal = certs[0] - return principal - else: - # TODO: search for cognito_ids - pass - raise ResourceNotFoundException() - - def attach_principal_policy(self, policy_name, principal_arn): - principal = self._get_principal(principal_arn) - policy = self.get_policy(policy_name) - k = (principal_arn, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_principal_policy(self, policy_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.get_policy(policy_name) - - k = (principal_arn, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_principal_policies(self, principal_arn): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] - return policies - - def list_policy_principals(self, policy_name): - principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] - return principals - - def attach_thing_principal(self, thing_name, principal_arn): - principal = self._get_principal(principal_arn) - thing = self.describe_thing(thing_name) - k = (principal_arn, thing_name) - if k in self.principal_things: - return - self.principal_things[k] = (principal, thing) - - def detach_thing_principal(self, thing_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.describe_thing(thing_name) - - k = (principal_arn, thing_name) - if k not in self.principal_things: - raise ResourceNotFoundException() - del self.principal_things[k] - - def list_principal_things(self, principal_arn): - thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] - return thing_names - - def list_thing_principals(self, thing_name): - principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] - return principals - - def describe_thing_group(self, thing_group_name): - thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] - if len(thing_groups) == 0: - raise ResourceNotFoundException() - return thing_groups[0] - - def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): - thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) - self.thing_groups[thing_group.arn] = thing_group - return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id - - def delete_thing_group(self, thing_group_name, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] - - def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups - - def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - if expected_version and expected_version != thing_group.version: - raise VersionConflictException(thing_group_name) - attribute_payload = thing_group_properties.get('attributePayload', None) - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing_group.thing_group_properties['attributePayload']['attributes'] = attributes - else: - thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) - elif attribute_payload is not None and 'attributes' not in attribute_payload: - thing_group.attributes = {} - thing_group.version = thing_group.version + 1 - return thing_group.version - - def _identify_thing_group(self, thing_group_name, thing_group_arn): - # identify thing group - if thing_group_name is None and thing_group_arn is None: - raise InvalidRequestException( - ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' - ) - if thing_group_name is not None: - thing_group = self.describe_thing_group(thing_group_name) - if thing_group_arn and thing_group.arn != thing_group_arn: - raise InvalidRequestException( - 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' - ) - elif thing_group_arn is not None: - if thing_group_arn not in self.thing_groups: - raise InvalidRequestException() - thing_group = self.thing_groups[thing_group_arn] - return thing_group - - def _identify_thing(self, thing_name, thing_arn): - # identify thing - if thing_name is None and thing_arn is None: - raise InvalidRequestException( - 'Both thingArn and thingName are empty. Need to specify at least one of them' - ) - if thing_name is not None: - thing = self.describe_thing(thing_name) - if thing_arn and thing.arn != thing_arn: - raise InvalidRequestException( - 'ThingName thingArn does not match specified thingName in request' - ) - elif thing_arn is not None: - if thing_arn not in self.things: - raise InvalidRequestException() - thing = self.things[thing_arn] - return thing - - def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn in thing_group.things: - # aws ignores duplicate registration - return - thing_group.things[thing.arn] = thing - - def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn not in thing_group.things: - # aws ignores non-registered thing - return - del thing_group.things[thing.arn] - - def list_things_in_thing_group(self, thing_group_name, recursive): - thing_group = self.describe_thing_group(thing_group_name) - return thing_group.things.values() - - def list_thing_groups_for_thing(self, thing_name): - thing = self.describe_thing(thing_name) - all_thing_groups = self.list_thing_groups(None, None, None) - ret = [] - for thing_group in all_thing_groups: - if thing.arn in thing_group.things: - ret.append({ - 'groupName': thing_group.thing_group_name, - 'groupArn': thing_group.arn - }) - return ret - - def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): - thing = self.describe_thing(thing_name) - for thing_group_name in thing_groups_to_add: - thing_group = self.describe_thing_group(thing_group_name) - self.add_thing_to_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - for thing_group_name in thing_groups_to_remove: - thing_group = self.describe_thing_group(thing_group_name) - self.remove_thing_from_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - - def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, - target_selection, job_executions_rollout_config, document_parameters): - job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, self.region_name) - self.jobs[job_id] = job - return job.job_arn, job_id, description - - def describe_job(self, job_id): - return self.jobs[job_id] - - def get_job_document(self, job_id): - return self.jobs[job_id] - - -available_regions = boto3.session.Session().get_available_regions("iot") -iot_backends = {region: IoTBackend(region) for region in available_regions} +from __future__ import unicode_literals + +import hashlib +import random +import re +import string +import time +import uuid +from collections import OrderedDict +from datetime import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException, + VersionConflictException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'thingArn': self.arn, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + self.ca_certificate_id = None + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name, default_version_id='1'): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.default_version_id + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.default_version_id + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + +class FakeJob(BaseModel): + JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" + JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) + + def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, region_name): + if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): + raise InvalidRequestException() + + self.region_name = region_name + self.job_id = job_id + self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) + self.targets = targets + self.document_source = document_source + self.document = document + self.description = description + self.presigned_url_config = presigned_url_config + self.target_selection = target_selection + self.job_executions_rollout_config = job_executions_rollout_config + self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.comment = None + self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.completed_at = None + self.job_process_details = { + 'processingTargets': targets, + 'numberOfQueuedThings': 1, + 'numberOfCanceledThings': 0, + 'numberOfSucceededThings': 0, + 'numberOfFailedThings': 0, + 'numberOfRejectedThings': 0, + 'numberOfInProgressThings': 0, + 'numberOfRemovedThings': 0 + } + self.document_parameters = document_parameters + + def to_dict(self): + + obj = { + 'jobArn': self.job_arn, + 'jobId': self.job_id, + 'targets': self.targets, + 'description': self.description, + 'presignedUrlConfig': self.presigned_url_config, + 'targetSelection': self.target_selection, + 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, + 'status': self.status, + 'comment': self.comment, + 'createdAt': self.created_at, + 'lastUpdatedAt': self.last_updated_at, + 'completedAt': self.completedAt, + 'jobProcessDetails': self.job_process_details, + 'documentParameters': self.document_parameters, + 'document': self.document, + 'documentSource': self.document_source + } + + return obj + + def _job_id_matcher(self, regex, argument): + regex_match = regex.match(argument) + length_match = len(argument) <= 64 + return regex_match and length_match + + +class FakeJobExecution(BaseModel): + + def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + self.job_id = job_id + self.status = status # IN_PROGRESS | CANCELED | COMPLETED + self.force_canceled = force_canceled + self.status_details_map = status_details_map + self.thing_arn = thing_arn + self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.execution_number = 123 + self.version_number = 123 + self.approximate_seconds_before_time_out = 123 + + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'status': self.status, + 'forceCancel': self.force_canceled, + 'statusDetails': {'detailsMap': self.status_details_map}, + 'thing_arn': self.thing_arn, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + 'versionNumber': self.version_number, + 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + } + + return obj + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.jobs = OrderedDict() + self.job_executions = OrderedDict() + self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's weird but thing_type_name is filtered by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + return self.thing_types.values() + + def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): + all_things = [_.to_dict() for _ in self.things.values()] + if attribute_name is not None and thing_type_name is not None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value and + "thingTypeName" in elem and + elem["thingTypeName"] == thing_type_name, all_things)) + elif attribute_name is not None and thing_type_name is None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value, all_things)) + elif attribute_name is None and thing_type_name is not None: + filtered_things = list( + filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) + else: + filtered_things = all_things + + if token is None: + things = filtered_things[0:max_results] + next_token = str(max_results) if len(filtered_things) > max_results else None + else: + token = int(token) + things = filtered_things[token:token + max_results] + next_token = str(token + max_results) if len(filtered_things) > token + max_results else None + + return things, next_token + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + self.describe_certificate(certificate_id) + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + + def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, + target_selection, job_executions_rollout_config, document_parameters): + job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, self.region_name) + self.jobs[job_id] = job + + for thing_arn in targets: + thing_name = thing_arn.split(':')[-1] + job_execution = FakeJobExecution(job_id, thing_arn) + self.job_executions[(job_id, thing_name)] = job_execution + return job.job_arn, job_id, description + + def describe_job(self, job_id): + return self.jobs[job_id] + + def get_job_document(self, job_id): + return self.jobs[job_id] + + def describe_job_execution(self, job_id, thing_name, execution_number): + # TODO filter with execution number + return self.job_executions[(job_id, thing_name)] + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je] for je in self.job_executions if je[0] == job_id] + # TODO: implement filters + return job_executions, next_token + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 3ef5bc93e..14302cc2f 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,497 +1,507 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import unquote - -from moto.core.responses import BaseResponse -from .models import iot_backends - - -class IoTResponse(BaseResponse): - SERVICE_NAME = 'iot' - - @property - def iot_backend(self): - return iot_backends[self.region] - - def create_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - thing_name, thing_arn = self.iot_backend.create_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - ) - return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) - - def create_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type_properties = self._get_param("thingTypeProperties") - thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( - thing_type_name=thing_type_name, - thing_type_properties=thing_type_properties, - ) - return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) - - def list_thing_types(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - thing_type_name = self._get_param("thingTypeName") - thing_types = self.iot_backend.list_thing_types( - thing_type_name=thing_type_name - ) - - thing_types = [_.to_dict() for _ in thing_types] - if previous_next_token is None: - result = thing_types[0:max_results] - next_token = str(max_results) if len(thing_types) > max_results else None - else: - token = int(previous_next_token) - result = thing_types[token:token + max_results] - next_token = str(token + max_results) if len(thing_types) > token + max_results else None - - return json.dumps(dict(thingTypes=result, nextToken=next_token)) - - def list_things(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - attribute_name = self._get_param("attributeName") - attribute_value = self._get_param("attributeValue") - thing_type_name = self._get_param("thingTypeName") - things, next_token = self.iot_backend.list_things( - attribute_name=attribute_name, - attribute_value=attribute_value, - thing_type_name=thing_type_name, - max_results=max_results, - token=previous_next_token - ) - - return json.dumps(dict(things=things, nextToken=next_token)) - - def describe_thing(self): - thing_name = self._get_param("thingName") - thing = self.iot_backend.describe_thing( - thing_name=thing_name, - ) - return json.dumps(thing.to_dict(include_default_client_id=True)) - - def describe_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type = self.iot_backend.describe_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(thing_type.to_dict()) - - def delete_thing(self): - thing_name = self._get_param("thingName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing( - thing_name=thing_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def delete_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - self.iot_backend.delete_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(dict()) - - def update_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - expected_version = self._get_param("expectedVersion") - remove_thing_type = self._get_param("removeThingType") - self.iot_backend.update_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - expected_version=expected_version, - remove_thing_type=remove_thing_type, - ) - return json.dumps(dict()) - - def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( - job_id=self._get_param("jobId"), - targets=self._get_param("targets"), - description=self._get_param("description"), - document_source=self._get_param("documentSource"), - document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), - job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), - document_parameters=self._get_param("documentParameters") - ) - - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) - - def describe_job(self): - job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) - - def get_job_document(self): - job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) - - if job.document is not None: - return json.dumps({'document': job.document}) - else: - # job.document_source is not None: - # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) - - def create_keys_and_certificate(self): - set_as_active = self._get_bool_param("setAsActive") - cert, key_pair = self.iot_backend.create_keys_and_certificate( - set_as_active=set_as_active, - ) - return json.dumps(dict( - certificateArn=cert.arn, - certificateId=cert.certificate_id, - certificatePem=cert.certificate_pem, - keyPair=key_pair - )) - - def delete_certificate(self): - certificate_id = self._get_param("certificateId") - self.iot_backend.delete_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict()) - - def describe_certificate(self): - certificate_id = self._get_param("certificateId") - certificate = self.iot_backend.describe_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict(certificateDescription=certificate.to_description_dict())) - - def list_certificates(self): - # page_size = self._get_int_param("pageSize") - # marker = self._get_param("marker") - # ascending_order = self._get_param("ascendingOrder") - certificates = self.iot_backend.list_certificates() - # TODO: implement pagination in the future - return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) - - def update_certificate(self): - certificate_id = self._get_param("certificateId") - new_status = self._get_param("newStatus") - self.iot_backend.update_certificate( - certificate_id=certificate_id, - new_status=new_status, - ) - return json.dumps(dict()) - - def create_policy(self): - policy_name = self._get_param("policyName") - policy_document = self._get_param("policyDocument") - policy = self.iot_backend.create_policy( - policy_name=policy_name, - policy_document=policy_document, - ) - return json.dumps(policy.to_dict_at_creation()) - - def list_policies(self): - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_policies() - - # TODO: implement pagination in the future - return json.dumps(dict(policies=[_.to_dict() for _ in policies])) - - def get_policy(self): - policy_name = self._get_param("policyName") - policy = self.iot_backend.get_policy( - policy_name=policy_name, - ) - return json.dumps(policy.to_get_dict()) - - def delete_policy(self): - policy_name = self._get_param("policyName") - self.iot_backend.delete_policy( - policy_name=policy_name, - ) - return json.dumps(dict()) - - def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) - - return json.dumps(dict(policy_version.to_dict_at_creation())) - - def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.set_default_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - policy_version = self.iot_backend.get_policy_version(policy_name, version_id) - return json.dumps(dict(policy_version.to_get_dict())) - - def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) - - return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) - - def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.delete_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def attach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.attach_policy( - policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def list_attached_policies(self): - principal = unquote(self._get_param('target')) - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def attach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.attach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.detach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_policies(self): - principal = self.headers.get('x-amzn-iot-principal') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_principal_policies( - principal_arn=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def list_policy_principals(self): - policy_name = self.headers.get('x-amzn-iot-policy') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - principals = self.iot_backend.list_policy_principals( - policy_name=policy_name, - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(principals=principals, nextMarker=next_marker)) - - def attach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.attach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.detach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_things(self): - next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - principal = self.headers.get('x-amzn-principal') - things = self.iot_backend.list_principal_things( - principal_arn=principal, - ) - # TODO: implement pagination in the future - next_token = None - return json.dumps(dict(things=things, nextToken=next_token)) - - def list_thing_principals(self): - thing_name = self._get_param("thingName") - principals = self.iot_backend.list_thing_principals( - thing_name=thing_name, - ) - return json.dumps(dict(principals=principals)) - - def describe_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group = self.iot_backend.describe_thing_group( - thing_group_name=thing_group_name, - ) - return json.dumps(thing_group.to_dict()) - - def create_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - parent_group_name = self._get_param("parentGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( - thing_group_name=thing_group_name, - parent_group_name=parent_group_name, - thing_group_properties=thing_group_properties, - ) - return json.dumps(dict( - thingGroupName=thing_group_name, - thingGroupArn=thing_group_arn, - thingGroupId=thing_group_id) - ) - - def delete_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing_group( - thing_group_name=thing_group_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def list_thing_groups(self): - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - parent_group = self._get_param("parentGroup") - name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") - thing_groups = self.iot_backend.list_thing_groups( - parent_group=parent_group, - name_prefix_filter=name_prefix_filter, - recursive=recursive, - ) - next_token = None - rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=rets, nextToken=next_token)) - - def update_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - expected_version = self._get_param("expectedVersion") - version = self.iot_backend.update_thing_group( - thing_group_name=thing_group_name, - thing_group_properties=thing_group_properties, - expected_version=expected_version, - ) - return json.dumps(dict(version=version)) - - def add_thing_to_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.add_thing_to_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def remove_thing_from_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.remove_thing_from_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def list_things_in_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - recursive = self._get_param("recursive") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - things = self.iot_backend.list_things_in_thing_group( - thing_group_name=thing_group_name, - recursive=recursive, - ) - next_token = None - thing_names = [_.thing_name for _ in things] - # TODO: implement pagination in the future - return json.dumps(dict(things=thing_names, nextToken=next_token)) - - def list_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - thing_groups = self.iot_backend.list_thing_groups_for_thing( - thing_name=thing_name - ) - next_token = None - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) - - def update_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] - thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] - self.iot_backend.update_thing_groups_for_thing( - thing_name=thing_name, - thing_groups_to_add=thing_groups_to_add, - thing_groups_to_remove=thing_groups_to_remove, - ) - return json.dumps(dict()) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import iot_backends + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + thing_types = [_.to_dict() for _ in thing_types] + if previous_next_token is None: + result = thing_types[0:max_results] + next_token = str(max_results) if len(thing_types) > max_results else None + else: + token = int(previous_next_token) + result = thing_types[token:token + max_results] + next_token = str(token + max_results) if len(thing_types) > token + max_results else None + + return json.dumps(dict(thingTypes=result, nextToken=next_token)) + + def list_things(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things, next_token = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + max_results=max_results, + token=previous_next_token + ) + + return json.dumps(dict(things=things, nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_job(self): + job_arn, job_id, description = self.iot_backend.create_job( + job_id=self._get_param("jobId"), + targets=self._get_param("targets"), + description=self._get_param("description"), + document_source=self._get_param("documentSource"), + document=self._get_param("document"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), + job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), + document_parameters=self._get_param("documentParameters") + ) + + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + + def describe_job(self): + job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) + + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + return json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + + def list_job_executions_for_job(self): + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=self._get_param("jobId"), + status=self._get_param("status"), + max_results=self._get_param( + "maxResults"), + next_token=self._get_param( + "nextToken")) + + return json.dumps(dict(executionSummaries=[_.to_dict() for _ in job_executions], nextToken=next_token)) + + def create_keys_and_certificate(self): + set_as_active = self._get_bool_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: implement pagination in the future + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: implement pagination in the future + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def attach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + principal = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=principal, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: implement pagination in the future + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 92fb3dfd0..d5f277d1d 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -874,3 +874,42 @@ def test_get_job_document_with_document(): job_document = client.get_job_document(jobId=job_id) job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + +@mock_iot +def test_list_job_executions_for_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_job(jobId=job_id) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) + From cfd12b6d19bb26de3935b7538224948196372fe9 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 7 Jan 2019 14:22:12 +0100 Subject: [PATCH 010/125] added IoT job_execution and job mocks --- IMPLEMENTATION_COVERAGE.md | 925 ++++++++++++++++++++++++++++++++++++- moto/iot/exceptions.py | 75 +-- moto/iot/models.py | 127 ++++- moto/iot/responses.py | 84 +++- tests/test_iot/test_iot.py | 260 ++++++++++- 5 files changed, 1399 insertions(+), 72 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index a153b92fc..fcfe31835 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -33,36 +33,59 @@ - [ ] update_certificate_authority ## alexaforbusiness - 0% implemented +- [ ] approve_skill - [ ] associate_contact_with_address_book - [ ] associate_device_with_room - [ ] associate_skill_group_with_room +- [ ] associate_skill_with_skill_group +- [ ] associate_skill_with_users - [ ] create_address_book +- [ ] create_business_report_schedule +- [ ] create_conference_provider - [ ] create_contact - [ ] create_profile - [ ] create_room - [ ] create_skill_group - [ ] create_user - [ ] delete_address_book +- [ ] delete_business_report_schedule +- [ ] delete_conference_provider - [ ] delete_contact +- [ ] delete_device - [ ] delete_profile - [ ] delete_room - [ ] delete_room_skill_parameter +- [ ] delete_skill_authorization - [ ] delete_skill_group - [ ] delete_user - [ ] disassociate_contact_from_address_book - [ ] disassociate_device_from_room +- [ ] disassociate_skill_from_skill_group +- [ ] disassociate_skill_from_users - [ ] disassociate_skill_group_from_room +- [ ] forget_smart_home_appliances - [ ] get_address_book +- [ ] get_conference_preference +- [ ] get_conference_provider - [ ] get_contact - [ ] get_device - [ ] get_profile - [ ] get_room - [ ] get_room_skill_parameter - [ ] get_skill_group +- [ ] list_business_report_schedules +- [ ] list_conference_providers - [ ] list_device_events - [ ] list_skills +- [ ] list_skills_store_categories +- [ ] list_skills_store_skills_by_category +- [ ] list_smart_home_appliances - [ ] list_tags +- [ ] put_conference_preference - [ ] put_room_skill_parameter +- [ ] put_skill_authorization +- [ ] register_avs_device +- [ ] reject_skill - [ ] resolve_room - [ ] revoke_invitation - [ ] search_address_books @@ -74,15 +97,40 @@ - [ ] search_users - [ ] send_invitation - [ ] start_device_sync +- [ ] start_smart_home_appliance_discovery - [ ] tag_resource - [ ] untag_resource - [ ] update_address_book +- [ ] update_business_report_schedule +- [ ] update_conference_provider - [ ] update_contact - [ ] update_device - [ ] update_profile - [ ] update_room - [ ] update_skill_group +## amplify - 0% implemented +- [ ] create_app +- [ ] create_branch +- [ ] create_domain_association +- [ ] delete_app +- [ ] delete_branch +- [ ] delete_domain_association +- [ ] delete_job +- [ ] get_app +- [ ] get_branch +- [ ] get_domain_association +- [ ] get_job +- [ ] list_apps +- [ ] list_branches +- [ ] list_domain_associations +- [ ] list_jobs +- [ ] start_job +- [ ] stop_job +- [ ] update_app +- [ ] update_branch +- [ ] update_domain_association + ## apigateway - 24% implemented - [ ] create_api_key - [ ] create_authorizer @@ -205,6 +253,67 @@ - [ ] update_usage_plan - [ ] update_vpc_link +## apigatewaymanagementapi - 0% implemented +- [ ] post_to_connection + +## apigatewayv2 - 0% implemented +- [ ] create_api +- [ ] create_api_mapping +- [ ] create_authorizer +- [ ] create_deployment +- [ ] create_domain_name +- [ ] create_integration +- [ ] create_integration_response +- [ ] create_model +- [ ] create_route +- [ ] create_route_response +- [ ] create_stage +- [ ] delete_api +- [ ] delete_api_mapping +- [ ] delete_authorizer +- [ ] delete_deployment +- [ ] delete_domain_name +- [ ] delete_integration +- [ ] delete_integration_response +- [ ] delete_model +- [ ] delete_route +- [ ] delete_route_response +- [ ] delete_stage +- [ ] get_api +- [ ] get_api_mapping +- [ ] get_api_mappings +- [ ] get_apis +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_deployment +- [ ] get_deployments +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_integration +- [ ] get_integration_response +- [ ] get_integration_responses +- [ ] get_integrations +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_route +- [ ] get_route_response +- [ ] get_route_responses +- [ ] get_routes +- [ ] get_stage +- [ ] get_stages +- [ ] update_api +- [ ] update_api_mapping +- [ ] update_authorizer +- [ ] update_deployment +- [ ] update_domain_name +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_model +- [ ] update_route +- [ ] update_route_response +- [ ] update_stage + ## application-autoscaling - 0% implemented - [ ] delete_scaling_policy - [ ] delete_scheduled_action @@ -217,8 +326,31 @@ - [ ] put_scheduled_action - [ ] register_scalable_target +## appmesh - 0% implemented +- [ ] create_mesh +- [ ] create_route +- [ ] create_virtual_node +- [ ] create_virtual_router +- [ ] delete_mesh +- [ ] delete_route +- [ ] delete_virtual_node +- [ ] delete_virtual_router +- [ ] describe_mesh +- [ ] describe_route +- [ ] describe_virtual_node +- [ ] describe_virtual_router +- [ ] list_meshes +- [ ] list_routes +- [ ] list_virtual_nodes +- [ ] list_virtual_routers +- [ ] update_route +- [ ] update_virtual_node +- [ ] update_virtual_router + ## appstream - 0% implemented - [ ] associate_fleet +- [ ] batch_associate_user_stack +- [ ] batch_disassociate_user_stack - [ ] copy_image - [ ] create_directory_config - [ ] create_fleet @@ -226,12 +358,14 @@ - [ ] create_image_builder_streaming_url - [ ] create_stack - [ ] create_streaming_url +- [ ] create_user - [ ] delete_directory_config - [ ] delete_fleet - [ ] delete_image - [ ] delete_image_builder - [ ] delete_image_permissions - [ ] delete_stack +- [ ] delete_user - [ ] describe_directory_configs - [ ] describe_fleets - [ ] describe_image_builders @@ -239,7 +373,11 @@ - [ ] describe_images - [ ] describe_sessions - [ ] describe_stacks +- [ ] describe_user_stack_associations +- [ ] describe_users +- [ ] disable_user - [ ] disassociate_fleet +- [ ] enable_user - [ ] expire_session - [ ] list_associated_fleets - [ ] list_associated_stacks @@ -258,15 +396,18 @@ ## appsync - 0% implemented - [ ] create_api_key - [ ] create_data_source +- [ ] create_function - [ ] create_graphql_api - [ ] create_resolver - [ ] create_type - [ ] delete_api_key - [ ] delete_data_source +- [ ] delete_function - [ ] delete_graphql_api - [ ] delete_resolver - [ ] delete_type - [ ] get_data_source +- [ ] get_function - [ ] get_graphql_api - [ ] get_introspection_schema - [ ] get_resolver @@ -274,12 +415,15 @@ - [ ] get_type - [ ] list_api_keys - [ ] list_data_sources +- [ ] list_functions - [ ] list_graphql_apis - [ ] list_resolvers +- [ ] list_resolvers_by_function - [ ] list_types - [ ] start_schema_creation - [ ] update_api_key - [ ] update_data_source +- [ ] update_function - [ ] update_graphql_api - [ ] update_resolver - [ ] update_type @@ -358,6 +502,7 @@ - [ ] delete_scaling_plan - [ ] describe_scaling_plan_resources - [ ] describe_scaling_plans +- [ ] get_scaling_plan_resource_forecast_data - [ ] update_scaling_plan ## batch - 93% implemented @@ -386,6 +531,7 @@ - [ ] delete_notification - [ ] delete_subscriber - [ ] describe_budget +- [ ] describe_budget_performance_history - [ ] describe_budgets - [ ] describe_notifications_for_budget - [ ] describe_subscribers_for_notification @@ -395,12 +541,31 @@ ## ce - 0% implemented - [ ] get_cost_and_usage +- [ ] get_cost_forecast - [ ] get_dimension_values - [ ] get_reservation_coverage - [ ] get_reservation_purchase_recommendation - [ ] get_reservation_utilization - [ ] get_tags +## chime - 0% implemented +- [ ] batch_suspend_user +- [ ] batch_unsuspend_user +- [ ] batch_update_user +- [ ] create_account +- [ ] delete_account +- [ ] get_account +- [ ] get_account_settings +- [ ] get_user +- [ ] invite_users +- [ ] list_accounts +- [ ] list_users +- [ ] logout_user +- [ ] reset_personal_pin +- [ ] update_account +- [ ] update_account_settings +- [ ] update_user + ## cloud9 - 0% implemented - [ ] create_environment_ec2 - [ ] create_environment_membership @@ -481,7 +646,7 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 21% implemented +## cloudformation - 20% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -494,13 +659,17 @@ - [ ] delete_stack_set - [ ] describe_account_limits - [ ] describe_change_set +- [ ] describe_stack_drift_detection_status - [ ] describe_stack_events - [ ] describe_stack_instance - [ ] describe_stack_resource +- [ ] describe_stack_resource_drifts - [ ] describe_stack_resources - [ ] describe_stack_set - [ ] describe_stack_set_operation - [X] describe_stacks +- [ ] detect_stack_drift +- [ ] detect_stack_resource_drift - [ ] estimate_template_cost - [X] execute_change_set - [ ] get_stack_policy @@ -597,12 +766,14 @@ - [ ] copy_backup_to_region - [ ] create_cluster - [ ] create_hsm +- [ ] delete_backup - [ ] delete_cluster - [ ] delete_hsm - [ ] describe_backups - [ ] describe_clusters - [ ] initialize_cluster - [ ] list_tags +- [ ] restore_backup - [ ] tag_resource - [ ] untag_resource @@ -653,7 +824,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 56% implemented +## cloudwatch - 52% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -664,6 +835,7 @@ - [X] get_dashboard - [ ] get_metric_data - [X] get_metric_statistics +- [ ] get_metric_widget_image - [X] list_dashboards - [ ] list_metrics - [X] put_dashboard @@ -678,12 +850,15 @@ - [ ] create_project - [ ] create_webhook - [ ] delete_project +- [ ] delete_source_credentials - [ ] delete_webhook +- [ ] import_source_credentials - [ ] invalidate_project_cache - [ ] list_builds - [ ] list_builds_for_project - [ ] list_curated_environment_images - [ ] list_projects +- [ ] list_source_credentials - [ ] start_build - [ ] stop_build - [ ] update_project @@ -696,6 +871,7 @@ - [ ] create_repository - [ ] delete_branch - [ ] delete_comment_content +- [ ] delete_file - [ ] delete_repository - [ ] describe_pull_request_events - [ ] get_blob @@ -705,6 +881,8 @@ - [ ] get_comments_for_pull_request - [ ] get_commit - [ ] get_differences +- [ ] get_file +- [ ] get_folder - [ ] get_merge_conflicts - [ ] get_pull_request - [ ] get_repository @@ -733,6 +911,7 @@ - [ ] batch_get_applications - [ ] batch_get_deployment_groups - [ ] batch_get_deployment_instances +- [ ] batch_get_deployment_targets - [ ] batch_get_deployments - [ ] batch_get_on_premises_instances - [ ] continue_deployment @@ -751,12 +930,14 @@ - [ ] get_deployment_config - [ ] get_deployment_group - [ ] get_deployment_instance +- [ ] get_deployment_target - [ ] get_on_premises_instance - [ ] list_application_revisions - [ ] list_applications - [ ] list_deployment_configs - [ ] list_deployment_groups - [ ] list_deployment_instances +- [ ] list_deployment_targets - [ ] list_deployments - [ ] list_git_hub_account_token_names - [ ] list_on_premises_instances @@ -937,6 +1118,7 @@ - [ ] update_user_attributes - [ ] update_user_pool - [X] update_user_pool_client +- [ ] update_user_pool_domain - [ ] verify_software_token - [ ] verify_user_attribute @@ -965,8 +1147,15 @@ - [ ] batch_detect_key_phrases - [ ] batch_detect_sentiment - [ ] batch_detect_syntax +- [ ] create_document_classifier +- [ ] create_entity_recognizer +- [ ] delete_document_classifier +- [ ] delete_entity_recognizer +- [ ] describe_document_classification_job +- [ ] describe_document_classifier - [ ] describe_dominant_language_detection_job - [ ] describe_entities_detection_job +- [ ] describe_entity_recognizer - [ ] describe_key_phrases_detection_job - [ ] describe_sentiment_detection_job - [ ] describe_topics_detection_job @@ -975,11 +1164,15 @@ - [ ] detect_key_phrases - [ ] detect_sentiment - [ ] detect_syntax +- [ ] list_document_classification_jobs +- [ ] list_document_classifiers - [ ] list_dominant_language_detection_jobs - [ ] list_entities_detection_jobs +- [ ] list_entity_recognizers - [ ] list_key_phrases_detection_jobs - [ ] list_sentiment_detection_jobs - [ ] list_topics_detection_jobs +- [ ] start_document_classification_job - [ ] start_dominant_language_detection_job - [ ] start_entities_detection_job - [ ] start_key_phrases_detection_job @@ -989,8 +1182,15 @@ - [ ] stop_entities_detection_job - [ ] stop_key_phrases_detection_job - [ ] stop_sentiment_detection_job +- [ ] stop_training_document_classifier +- [ ] stop_training_entity_recognizer + +## comprehendmedical - 0% implemented +- [ ] detect_entities +- [ ] detect_phi ## config - 0% implemented +- [ ] batch_get_aggregate_resource_config - [ ] batch_get_resource_config - [ ] delete_aggregation_authorization - [ ] delete_config_rule @@ -1017,12 +1217,15 @@ - [ ] describe_retention_configurations - [ ] get_aggregate_compliance_details_by_config_rule - [ ] get_aggregate_config_rule_compliance_summary +- [ ] get_aggregate_discovered_resource_counts +- [ ] get_aggregate_resource_config - [ ] get_compliance_details_by_config_rule - [ ] get_compliance_details_by_resource - [ ] get_compliance_summary_by_config_rule - [ ] get_compliance_summary_by_resource_type - [ ] get_discovered_resource_counts - [ ] get_resource_config_history +- [ ] list_aggregate_discovered_resources - [ ] list_discovered_resources - [ ] put_aggregation_authorization - [ ] put_config_rule @@ -1041,13 +1244,17 @@ - [ ] describe_user - [ ] describe_user_hierarchy_group - [ ] describe_user_hierarchy_structure +- [ ] get_contact_attributes +- [ ] get_current_metric_data - [ ] get_federation_token +- [ ] get_metric_data - [ ] list_routing_profiles - [ ] list_security_profiles - [ ] list_user_hierarchy_groups - [ ] list_users - [ ] start_outbound_voice_contact - [ ] stop_contact +- [ ] update_contact_attributes - [ ] update_user_hierarchy - [ ] update_user_identity_info - [ ] update_user_phone_config @@ -1080,6 +1287,33 @@ - [ ] set_task_status - [ ] validate_pipeline_definition +## datasync - 0% implemented +- [ ] cancel_task_execution +- [ ] create_agent +- [ ] create_location_efs +- [ ] create_location_nfs +- [ ] create_location_s3 +- [ ] create_task +- [ ] delete_agent +- [ ] delete_location +- [ ] delete_task +- [ ] describe_agent +- [ ] describe_location_efs +- [ ] describe_location_nfs +- [ ] describe_location_s3 +- [ ] describe_task +- [ ] describe_task_execution +- [ ] list_agents +- [ ] list_locations +- [ ] list_tags_for_resource +- [ ] list_task_executions +- [ ] list_tasks +- [ ] start_task_execution +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_agent +- [ ] update_task + ## dax - 0% implemented - [ ] create_cluster - [ ] create_parameter_group @@ -1214,6 +1448,7 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_lag +- [ ] update_virtual_interface_attributes ## discovery - 0% implemented - [ ] associate_configuration_items_to_application @@ -1293,6 +1528,7 @@ - [ ] test_connection ## ds - 0% implemented +- [ ] accept_shared_directory - [ ] add_ip_routes - [ ] add_tags_to_resource - [ ] cancel_schema_extension @@ -1301,11 +1537,13 @@ - [ ] create_computer - [ ] create_conditional_forwarder - [ ] create_directory +- [ ] create_log_subscription - [ ] create_microsoft_ad - [ ] create_snapshot - [ ] create_trust - [ ] delete_conditional_forwarder - [ ] delete_directory +- [ ] delete_log_subscription - [ ] delete_snapshot - [ ] delete_trust - [ ] deregister_event_topic @@ -1313,6 +1551,7 @@ - [ ] describe_directories - [ ] describe_domain_controllers - [ ] describe_event_topics +- [ ] describe_shared_directories - [ ] describe_snapshots - [ ] describe_trusts - [ ] disable_radius @@ -1322,20 +1561,25 @@ - [ ] get_directory_limits - [ ] get_snapshot_limits - [ ] list_ip_routes +- [ ] list_log_subscriptions - [ ] list_schema_extensions - [ ] list_tags_for_resource - [ ] register_event_topic +- [ ] reject_shared_directory - [ ] remove_ip_routes - [ ] remove_tags_from_resource - [ ] reset_user_password - [ ] restore_from_snapshot +- [ ] share_directory - [ ] start_schema_extension +- [ ] unshare_directory - [ ] update_conditional_forwarder - [ ] update_number_of_domain_controllers - [ ] update_radius +- [ ] update_trust - [ ] verify_trust -## dynamodb - 21% implemented +## dynamodb - 19% implemented - [ ] batch_get_item - [ ] batch_write_item - [ ] create_backup @@ -1346,6 +1590,7 @@ - [X] delete_table - [ ] describe_backup - [ ] describe_continuous_backups +- [ ] describe_endpoints - [ ] describe_global_table - [ ] describe_global_table_settings - [ ] describe_limits @@ -1362,6 +1607,8 @@ - [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource +- [ ] transact_get_items +- [ ] transact_write_items - [ ] untag_resource - [ ] update_continuous_backups - [ ] update_global_table @@ -1376,29 +1623,36 @@ - [ ] get_shard_iterator - [ ] list_streams -## ec2 - 36% implemented +## ec2 - 30% implemented - [ ] accept_reserved_instances_exchange_quote +- [ ] accept_transit_gateway_vpc_attachment - [ ] accept_vpc_endpoint_connections - [X] accept_vpc_peering_connection +- [ ] advertise_byoip_cidr - [X] allocate_address - [ ] allocate_hosts +- [ ] apply_security_groups_to_client_vpn_target_network - [ ] assign_ipv6_addresses - [ ] assign_private_ip_addresses - [X] associate_address +- [ ] associate_client_vpn_target_network - [X] associate_dhcp_options - [ ] associate_iam_instance_profile - [X] associate_route_table - [ ] associate_subnet_cidr_block +- [ ] associate_transit_gateway_route_table - [X] associate_vpc_cidr_block - [ ] attach_classic_link_vpc - [X] attach_internet_gateway - [X] attach_network_interface - [X] attach_volume - [X] attach_vpn_gateway +- [ ] authorize_client_vpn_ingress - [X] authorize_security_group_egress - [X] authorize_security_group_ingress - [ ] bundle_instance - [ ] cancel_bundle_task +- [ ] cancel_capacity_reservation - [ ] cancel_conversion_task - [ ] cancel_export_task - [ ] cancel_import_task @@ -1409,6 +1663,9 @@ - [ ] copy_fpga_image - [X] copy_image - [X] copy_snapshot +- [ ] create_capacity_reservation +- [ ] create_client_vpn_endpoint +- [ ] create_client_vpn_route - [X] create_customer_gateway - [ ] create_default_subnet - [ ] create_default_vpc @@ -1437,6 +1694,10 @@ - [ ] create_spot_datafeed_subscription - [X] create_subnet - [X] create_tags +- [ ] create_transit_gateway +- [ ] create_transit_gateway_route +- [ ] create_transit_gateway_route_table +- [ ] create_transit_gateway_vpc_attachment - [X] create_volume - [X] create_vpc - [ ] create_vpc_endpoint @@ -1446,6 +1707,8 @@ - [X] create_vpn_connection - [ ] create_vpn_connection_route - [X] create_vpn_gateway +- [ ] delete_client_vpn_endpoint +- [ ] delete_client_vpn_route - [X] delete_customer_gateway - [ ] delete_dhcp_options - [ ] delete_egress_only_internet_gateway @@ -1469,6 +1732,10 @@ - [ ] delete_spot_datafeed_subscription - [X] delete_subnet - [X] delete_tags +- [ ] delete_transit_gateway +- [ ] delete_transit_gateway_route +- [ ] delete_transit_gateway_route_table +- [ ] delete_transit_gateway_vpc_attachment - [X] delete_volume - [X] delete_vpc - [ ] delete_vpc_endpoint_connection_notifications @@ -1478,13 +1745,21 @@ - [X] delete_vpn_connection - [ ] delete_vpn_connection_route - [X] delete_vpn_gateway +- [ ] deprovision_byoip_cidr - [X] deregister_image - [ ] describe_account_attributes - [X] describe_addresses - [ ] describe_aggregate_id_format - [X] describe_availability_zones - [ ] describe_bundle_tasks +- [ ] describe_byoip_cidrs +- [ ] describe_capacity_reservations - [ ] describe_classic_link_instances +- [ ] describe_client_vpn_authorization_rules +- [ ] describe_client_vpn_connections +- [ ] describe_client_vpn_endpoints +- [ ] describe_client_vpn_routes +- [ ] describe_client_vpn_target_networks - [ ] describe_conversion_tasks - [ ] describe_customer_gateways - [X] describe_dhcp_options @@ -1524,6 +1799,7 @@ - [ ] describe_placement_groups - [ ] describe_prefix_lists - [ ] describe_principal_id_format +- [ ] describe_public_ipv4_pools - [X] describe_regions - [ ] describe_reserved_instances - [ ] describe_reserved_instances_listings @@ -1545,6 +1821,10 @@ - [ ] describe_stale_security_groups - [ ] describe_subnets - [X] describe_tags +- [ ] describe_transit_gateway_attachments +- [ ] describe_transit_gateway_route_tables +- [ ] describe_transit_gateway_vpc_attachments +- [ ] describe_transit_gateways - [ ] describe_volume_attribute - [ ] describe_volume_status - [X] describe_volumes @@ -1567,29 +1847,42 @@ - [X] detach_network_interface - [X] detach_volume - [X] detach_vpn_gateway +- [ ] disable_transit_gateway_route_table_propagation - [ ] disable_vgw_route_propagation - [ ] disable_vpc_classic_link - [ ] disable_vpc_classic_link_dns_support - [X] disassociate_address +- [ ] disassociate_client_vpn_target_network - [ ] disassociate_iam_instance_profile - [X] disassociate_route_table - [ ] disassociate_subnet_cidr_block +- [ ] disassociate_transit_gateway_route_table - [X] disassociate_vpc_cidr_block +- [ ] enable_transit_gateway_route_table_propagation - [ ] enable_vgw_route_propagation - [ ] enable_volume_io - [ ] enable_vpc_classic_link - [ ] enable_vpc_classic_link_dns_support +- [ ] export_client_vpn_client_certificate_revocation_list +- [ ] export_client_vpn_client_configuration +- [ ] export_transit_gateway_routes - [ ] get_console_output - [ ] get_console_screenshot - [ ] get_host_reservation_purchase_preview - [ ] get_launch_template_data - [ ] get_password_data - [ ] get_reserved_instances_exchange_quote +- [ ] get_transit_gateway_attachment_propagations +- [ ] get_transit_gateway_route_table_associations +- [ ] get_transit_gateway_route_table_propagations +- [ ] import_client_vpn_client_certificate_revocation_list - [ ] import_image - [ ] import_instance - [X] import_key_pair - [ ] import_snapshot - [ ] import_volume +- [ ] modify_capacity_reservation +- [ ] modify_client_vpn_endpoint - [ ] modify_fleet - [ ] modify_fpga_image_attribute - [ ] modify_hosts @@ -1597,6 +1890,7 @@ - [ ] modify_identity_id_format - [ ] modify_image_attribute - [X] modify_instance_attribute +- [ ] modify_instance_capacity_reservation_attributes - [ ] modify_instance_credit_specification - [ ] modify_instance_placement - [ ] modify_launch_template @@ -1605,6 +1899,7 @@ - [ ] modify_snapshot_attribute - [X] modify_spot_fleet_request - [X] modify_subnet_attribute +- [ ] modify_transit_gateway_vpc_attachment - [ ] modify_volume - [ ] modify_volume_attribute - [X] modify_vpc_attribute @@ -1616,11 +1911,13 @@ - [ ] modify_vpc_tenancy - [ ] monitor_instances - [ ] move_address_to_vpc +- [ ] provision_byoip_cidr - [ ] purchase_host_reservation - [ ] purchase_reserved_instances_offering - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] reject_transit_gateway_vpc_attachment - [ ] reject_vpc_endpoint_connections - [X] reject_vpc_peering_connection - [X] release_address @@ -1630,6 +1927,7 @@ - [X] replace_network_acl_entry - [X] replace_route - [X] replace_route_table_association +- [ ] replace_transit_gateway_route - [ ] report_instance_status - [X] request_spot_fleet - [X] request_spot_instances @@ -1639,20 +1937,24 @@ - [ ] reset_network_interface_attribute - [ ] reset_snapshot_attribute - [ ] restore_address_to_classic +- [ ] revoke_client_vpn_ingress - [X] revoke_security_group_egress - [X] revoke_security_group_ingress - [ ] run_instances - [ ] run_scheduled_instances +- [ ] search_transit_gateway_routes - [X] start_instances - [X] stop_instances +- [ ] terminate_client_vpn_connections - [X] terminate_instances - [ ] unassign_ipv6_addresses - [ ] unassign_private_ip_addresses - [ ] unmonitor_instances - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress +- [ ] withdraw_byoip_cidr -## ecr - 31% implemented +## ecr - 28% implemented - [ ] batch_check_layer_availability - [ ] batch_delete_image - [X] batch_get_image @@ -1670,15 +1972,19 @@ - [ ] get_repository_policy - [ ] initiate_layer_upload - [X] list_images +- [ ] list_tags_for_resource - [X] put_image - [ ] put_lifecycle_policy - [ ] set_repository_policy - [ ] start_lifecycle_policy_preview +- [ ] tag_resource +- [ ] untag_resource - [ ] upload_layer_part -## ecs - 87% implemented +## ecs - 72% implemented - [X] create_cluster - [X] create_service +- [ ] delete_account_setting - [X] delete_attributes - [X] delete_cluster - [X] delete_service @@ -1690,13 +1996,16 @@ - [X] describe_task_definition - [X] describe_tasks - [ ] discover_poll_endpoint +- [ ] list_account_settings - [X] list_attributes - [X] list_clusters - [X] list_container_instances - [X] list_services +- [ ] list_tags_for_resource - [X] list_task_definition_families - [X] list_task_definitions - [X] list_tasks +- [ ] put_account_setting - [X] put_attributes - [X] register_container_instance - [X] register_task_definition @@ -1705,6 +2014,8 @@ - [X] stop_task - [ ] submit_container_state_change - [ ] submit_task_state_change +- [ ] tag_resource +- [ ] untag_resource - [ ] update_container_agent - [X] update_container_instances_state - [X] update_service @@ -1727,7 +2038,10 @@ - [ ] create_cluster - [ ] delete_cluster - [ ] describe_cluster +- [ ] describe_update - [ ] list_clusters +- [ ] list_updates +- [ ] update_cluster_version ## elasticache - 0% implemented - [ ] add_tags_to_resource @@ -1739,6 +2053,7 @@ - [ ] create_cache_subnet_group - [ ] create_replication_group - [ ] create_snapshot +- [ ] decrease_replica_count - [ ] delete_cache_cluster - [ ] delete_cache_parameter_group - [ ] delete_cache_security_group @@ -1757,6 +2072,7 @@ - [ ] describe_reserved_cache_nodes - [ ] describe_reserved_cache_nodes_offerings - [ ] describe_snapshots +- [ ] increase_replica_count - [ ] list_allowed_node_type_modifications - [ ] list_tags_for_resource - [ ] modify_cache_cluster @@ -1934,6 +2250,7 @@ ## es - 0% implemented - [ ] add_tags +- [ ] cancel_elasticsearch_service_software_update - [ ] create_elasticsearch_domain - [ ] delete_elasticsearch_domain - [ ] delete_elasticsearch_service_role @@ -1952,6 +2269,7 @@ - [ ] list_tags - [ ] purchase_reserved_elasticsearch_instance_offering - [ ] remove_tags +- [ ] start_elasticsearch_service_software_update - [ ] update_elasticsearch_domain_config - [ ] upgrade_elasticsearch_domain @@ -1980,6 +2298,8 @@ - [ ] list_tags_for_delivery_stream - [ ] put_record - [ ] put_record_batch +- [ ] start_delivery_stream_encryption +- [ ] stop_delivery_stream_encryption - [ ] tag_delivery_stream - [ ] untag_delivery_stream - [ ] update_destination @@ -1994,10 +2314,24 @@ - [ ] get_notification_channel - [ ] get_policy - [ ] list_compliance_status +- [ ] list_member_accounts - [ ] list_policies - [ ] put_notification_channel - [ ] put_policy +## fsx - 0% implemented +- [ ] create_backup +- [ ] create_file_system +- [ ] create_file_system_from_backup +- [ ] delete_backup +- [ ] delete_file_system +- [ ] describe_backups +- [ ] describe_file_systems +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_file_system + ## gamelift - 0% implemented - [ ] accept_match - [ ] create_alias @@ -2102,7 +2436,26 @@ - [ ] upload_archive - [ ] upload_multipart_part -## glue - 6% implemented +## globalaccelerator - 0% implemented +- [ ] create_accelerator +- [ ] create_endpoint_group +- [ ] create_listener +- [ ] delete_accelerator +- [ ] delete_endpoint_group +- [ ] delete_listener +- [ ] describe_accelerator +- [ ] describe_accelerator_attributes +- [ ] describe_endpoint_group +- [ ] describe_listener +- [ ] list_accelerators +- [ ] list_endpoint_groups +- [ ] list_listeners +- [ ] update_accelerator +- [ ] update_accelerator_attributes +- [ ] update_endpoint_group +- [ ] update_listener + +## glue - 5% implemented - [ ] batch_create_partition - [ ] batch_delete_connection - [ ] batch_delete_partition @@ -2118,6 +2471,7 @@ - [ ] create_job - [ ] create_partition - [ ] create_script +- [ ] create_security_configuration - [X] create_table - [ ] create_trigger - [ ] create_user_defined_function @@ -2128,6 +2482,8 @@ - [ ] delete_dev_endpoint - [ ] delete_job - [ ] delete_partition +- [ ] delete_resource_policy +- [ ] delete_security_configuration - [ ] delete_table - [ ] delete_table_version - [ ] delete_trigger @@ -2140,6 +2496,7 @@ - [ ] get_crawler - [ ] get_crawler_metrics - [ ] get_crawlers +- [ ] get_data_catalog_encryption_settings - [X] get_database - [ ] get_databases - [ ] get_dataflow_graph @@ -2153,6 +2510,9 @@ - [ ] get_partition - [ ] get_partitions - [ ] get_plan +- [ ] get_resource_policy +- [ ] get_security_configuration +- [ ] get_security_configurations - [X] get_table - [ ] get_table_version - [ ] get_table_versions @@ -2162,6 +2522,8 @@ - [ ] get_user_defined_function - [ ] get_user_defined_functions - [ ] import_catalog_to_glue +- [ ] put_data_catalog_encryption_settings +- [ ] put_resource_policy - [ ] reset_job_bookmark - [ ] start_crawler - [ ] start_crawler_schedule @@ -2185,6 +2547,8 @@ ## greengrass - 0% implemented - [ ] associate_role_to_group - [ ] associate_service_role_to_account +- [ ] create_connector_definition +- [ ] create_connector_definition_version - [ ] create_core_definition - [ ] create_core_definition_version - [ ] create_deployment @@ -2202,6 +2566,7 @@ - [ ] create_software_update_job - [ ] create_subscription_definition - [ ] create_subscription_definition_version +- [ ] delete_connector_definition - [ ] delete_core_definition - [ ] delete_device_definition - [ ] delete_function_definition @@ -2212,7 +2577,10 @@ - [ ] disassociate_role_from_group - [ ] disassociate_service_role_from_account - [ ] get_associated_role +- [ ] get_bulk_deployment_status - [ ] get_connectivity_info +- [ ] get_connector_definition +- [ ] get_connector_definition_version - [ ] get_core_definition - [ ] get_core_definition_version - [ ] get_deployment_status @@ -2231,6 +2599,10 @@ - [ ] get_service_role_for_account - [ ] get_subscription_definition - [ ] get_subscription_definition_version +- [ ] list_bulk_deployment_detailed_reports +- [ ] list_bulk_deployments +- [ ] list_connector_definition_versions +- [ ] list_connector_definitions - [ ] list_core_definition_versions - [ ] list_core_definitions - [ ] list_deployments @@ -2248,7 +2620,10 @@ - [ ] list_subscription_definition_versions - [ ] list_subscription_definitions - [ ] reset_deployments +- [ ] start_bulk_deployment +- [ ] stop_bulk_deployment - [ ] update_connectivity_info +- [ ] update_connector_definition - [ ] update_core_definition - [ ] update_device_definition - [ ] update_function_definition @@ -2310,7 +2685,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 47% implemented +## iam - 43% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2361,6 +2736,7 @@ - [X] detach_user_policy - [X] enable_mfa_device - [ ] generate_credential_report +- [ ] generate_service_last_accessed_details - [ ] get_access_key_last_used - [X] get_account_authorization_details - [ ] get_account_password_policy @@ -2379,6 +2755,8 @@ - [X] get_role_policy - [ ] get_saml_provider - [X] get_server_certificate +- [ ] get_service_last_accessed_details +- [ ] get_service_last_accessed_details_with_entities - [ ] get_service_linked_role_deletion_status - [ ] get_ssh_public_key - [X] get_user @@ -2397,8 +2775,10 @@ - [X] list_mfa_devices - [ ] list_open_id_connect_providers - [X] list_policies +- [ ] list_policies_granting_service_access - [X] list_policy_versions - [X] list_role_policies +- [ ] list_role_tags - [ ] list_roles - [ ] list_saml_providers - [ ] list_server_certificates @@ -2406,6 +2786,7 @@ - [ ] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies +- [ ] list_user_tags - [X] list_users - [ ] list_virtual_mfa_devices - [X] put_group_policy @@ -2421,6 +2802,10 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy +- [ ] tag_role +- [ ] tag_user +- [ ] untag_role +- [ ] untag_user - [X] update_access_key - [ ] update_account_password_policy - [ ] update_assume_role_policy @@ -2488,6 +2873,7 @@ ## iot - 31% implemented - [ ] accept_certificate_transfer +- [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group - [ ] associate_targets_with_job - [X] attach_policy @@ -2497,10 +2883,12 @@ - [ ] cancel_audit_task - [ ] cancel_certificate_transfer - [ ] cancel_job -- [ ] cancel_job_execution +- [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] create_authorizer +- [ ] create_billing_group - [ ] create_certificate_from_csr +- [ ] create_dynamic_thing_group - [X] create_job - [X] create_keys_and_certificate - [ ] create_ota_update @@ -2516,10 +2904,12 @@ - [ ] create_topic_rule - [ ] delete_account_audit_configuration - [ ] delete_authorizer +- [ ] delete_billing_group - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_dynamic_thing_group - [ ] delete_job -- [ ] delete_job_execution +- [X] delete_job_execution - [ ] delete_ota_update - [X] delete_policy - [X] delete_policy_version @@ -2537,6 +2927,7 @@ - [ ] describe_account_audit_configuration - [ ] describe_audit_task - [ ] describe_authorizer +- [ ] describe_billing_group - [ ] describe_ca_certificate - [X] describe_certificate - [ ] describe_default_authorizer @@ -2544,7 +2935,7 @@ - [ ] describe_event_configurations - [ ] describe_index - [X] describe_job -- [ ] describe_job_execution +- [X] describe_job_execution - [ ] describe_role_alias - [ ] describe_scheduled_audit - [ ] describe_security_profile @@ -2574,13 +2965,14 @@ - [ ] list_audit_findings - [ ] list_audit_tasks - [ ] list_authorizers +- [ ] list_billing_groups - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca - [ ] list_indices -- [ ] list_job_executions_for_job -- [ ] list_job_executions_for_thing -- [ ] list_jobs +- [X] list_job_executions_for_job +- [X] list_job_executions_for_thing +- [X] list_jobs - [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies @@ -2593,6 +2985,7 @@ - [ ] list_security_profiles - [ ] list_security_profiles_for_target - [ ] list_streams +- [ ] list_tags_for_resource - [ ] list_targets_for_policy - [ ] list_targets_for_security_profile - [X] list_thing_groups @@ -2602,6 +2995,7 @@ - [ ] list_thing_registration_tasks - [X] list_thing_types - [X] list_things +- [ ] list_things_in_billing_group - [X] list_things_in_thing_group - [ ] list_topic_rules - [ ] list_v2_logging_levels @@ -2610,6 +3004,7 @@ - [ ] register_certificate - [ ] register_thing - [ ] reject_certificate_transfer +- [ ] remove_thing_from_billing_group - [X] remove_thing_from_thing_group - [ ] replace_topic_rule - [ ] search_index @@ -2621,15 +3016,20 @@ - [ ] start_on_demand_audit_task - [ ] start_thing_registration_task - [ ] stop_thing_registration_task +- [ ] tag_resource - [ ] test_authorization - [ ] test_invoke_authorizer - [ ] transfer_certificate +- [ ] untag_resource - [ ] update_account_audit_configuration - [ ] update_authorizer +- [ ] update_billing_group - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_dynamic_thing_group - [ ] update_event_configurations - [ ] update_indexing_configuration +- [ ] update_job - [ ] update_role_alias - [ ] update_scheduled_audit - [ ] update_security_profile @@ -2698,6 +3098,7 @@ - [ ] describe_pipeline - [ ] get_dataset_content - [ ] list_channels +- [ ] list_dataset_contents - [ ] list_datasets - [ ] list_datastores - [ ] list_pipelines @@ -2713,6 +3114,14 @@ - [ ] update_datastore - [ ] update_pipeline +## kafka - 0% implemented +- [ ] create_cluster +- [ ] delete_cluster +- [ ] describe_cluster +- [ ] get_bootstrap_brokers +- [ ] list_clusters +- [ ] list_nodes + ## kinesis - 46% implemented - [X] add_tags_to_stream - [X] create_stream @@ -2770,6 +3179,29 @@ - [ ] stop_application - [ ] update_application +## kinesisanalyticsv2 - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] create_application_snapshot +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] delete_application_snapshot +- [ ] describe_application +- [ ] describe_application_snapshot +- [ ] discover_input_schema +- [ ] list_application_snapshots +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + ## kinesisvideo - 0% implemented - [ ] create_stream - [ ] delete_stream @@ -2782,17 +3214,22 @@ - [ ] update_data_retention - [ ] update_stream -## kms - 37% implemented +## kms - 31% implemented - [X] cancel_key_deletion +- [ ] connect_custom_key_store - [ ] create_alias +- [ ] create_custom_key_store - [ ] create_grant - [X] create_key - [ ] decrypt - [X] delete_alias +- [ ] delete_custom_key_store - [ ] delete_imported_key_material +- [ ] describe_custom_key_stores - [X] describe_key - [X] disable_key - [X] disable_key_rotation +- [ ] disconnect_custom_key_store - [X] enable_key - [X] enable_key_rotation - [ ] encrypt @@ -2817,9 +3254,11 @@ - [ ] tag_resource - [ ] untag_resource - [ ] update_alias +- [ ] update_custom_key_store - [ ] update_key_description ## lambda - 0% implemented +- [ ] add_layer_version_permission - [ ] add_permission - [ ] create_alias - [ ] create_event_source_mapping @@ -2828,21 +3267,28 @@ - [ ] delete_event_source_mapping - [ ] delete_function - [ ] delete_function_concurrency +- [ ] delete_layer_version - [ ] get_account_settings - [ ] get_alias - [ ] get_event_source_mapping - [ ] get_function - [ ] get_function_configuration +- [ ] get_layer_version +- [ ] get_layer_version_policy - [ ] get_policy - [ ] invoke - [ ] invoke_async - [ ] list_aliases - [ ] list_event_source_mappings - [ ] list_functions +- [ ] list_layer_versions +- [ ] list_layers - [ ] list_tags - [ ] list_versions_by_function +- [ ] publish_layer_version - [ ] publish_version - [ ] put_function_concurrency +- [ ] remove_layer_version_permission - [ ] remove_permission - [ ] tag_resource - [ ] untag_resource @@ -2893,6 +3339,23 @@ - [ ] post_content - [ ] post_text +## license-manager - 0% implemented +- [ ] create_license_configuration +- [ ] delete_license_configuration +- [ ] get_license_configuration +- [ ] get_service_settings +- [ ] list_associations_for_license_configuration +- [ ] list_license_configurations +- [ ] list_license_specifications_for_resource +- [ ] list_resource_inventory +- [ ] list_tags_for_resource +- [ ] list_usage_for_license_configuration +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_license_configuration +- [ ] update_license_specifications_for_resource +- [ ] update_service_settings + ## lightsail - 0% implemented - [ ] allocate_static_ip - [ ] attach_disk @@ -2900,6 +3363,8 @@ - [ ] attach_load_balancer_tls_certificate - [ ] attach_static_ip - [ ] close_instance_public_ports +- [ ] copy_snapshot +- [ ] create_cloud_formation_stack - [ ] create_disk - [ ] create_disk_from_snapshot - [ ] create_disk_snapshot @@ -2911,6 +3376,9 @@ - [ ] create_key_pair - [ ] create_load_balancer - [ ] create_load_balancer_tls_certificate +- [ ] create_relational_database +- [ ] create_relational_database_from_snapshot +- [ ] create_relational_database_snapshot - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -2920,19 +3388,24 @@ - [ ] delete_key_pair - [ ] delete_load_balancer - [ ] delete_load_balancer_tls_certificate +- [ ] delete_relational_database +- [ ] delete_relational_database_snapshot - [ ] detach_disk - [ ] detach_instances_from_load_balancer - [ ] detach_static_ip - [ ] download_default_key_pair +- [ ] export_snapshot - [ ] get_active_names - [ ] get_blueprints - [ ] get_bundles +- [ ] get_cloud_formation_stack_records - [ ] get_disk - [ ] get_disk_snapshot - [ ] get_disk_snapshots - [ ] get_disks - [ ] get_domain - [ ] get_domains +- [ ] get_export_snapshot_records - [ ] get_instance - [ ] get_instance_access_details - [ ] get_instance_metric_data @@ -2951,6 +3424,18 @@ - [ ] get_operations - [ ] get_operations_for_resource - [ ] get_regions +- [ ] get_relational_database +- [ ] get_relational_database_blueprints +- [ ] get_relational_database_bundles +- [ ] get_relational_database_events +- [ ] get_relational_database_log_events +- [ ] get_relational_database_log_streams +- [ ] get_relational_database_master_user_password +- [ ] get_relational_database_metric_data +- [ ] get_relational_database_parameters +- [ ] get_relational_database_snapshot +- [ ] get_relational_database_snapshots +- [ ] get_relational_databases - [ ] get_static_ip - [ ] get_static_ips - [ ] import_key_pair @@ -2959,14 +3444,21 @@ - [ ] peer_vpc - [ ] put_instance_public_ports - [ ] reboot_instance +- [ ] reboot_relational_database - [ ] release_static_ip - [ ] start_instance +- [ ] start_relational_database - [ ] stop_instance +- [ ] stop_relational_database +- [ ] tag_resource - [ ] unpeer_vpc +- [ ] untag_resource - [ ] update_domain_entry - [ ] update_load_balancer_attribute +- [ ] update_relational_database +- [ ] update_relational_database_parameters -## logs - 27% implemented +## logs - 23% implemented - [ ] associate_kms_key - [ ] cancel_export_task - [ ] create_export_task @@ -2984,11 +3476,15 @@ - [X] describe_log_groups - [X] describe_log_streams - [ ] describe_metric_filters +- [ ] describe_queries - [ ] describe_resource_policies - [ ] describe_subscription_filters - [ ] disassociate_kms_key - [X] filter_log_events - [X] get_log_events +- [ ] get_log_group_fields +- [ ] get_log_record +- [ ] get_query_results - [ ] list_tags_log_group - [ ] put_destination - [ ] put_destination_policy @@ -2997,6 +3493,8 @@ - [ ] put_resource_policy - [ ] put_retention_policy - [ ] put_subscription_filter +- [ ] start_query +- [ ] stop_query - [ ] tag_log_group - [ ] test_metric_filter - [ ] untag_log_group @@ -3047,7 +3545,24 @@ - [ ] generate_data_set - [ ] start_support_data_export +## mediaconnect - 0% implemented +- [ ] add_flow_outputs +- [ ] create_flow +- [ ] delete_flow +- [ ] describe_flow +- [ ] grant_flow_entitlements +- [ ] list_entitlements +- [ ] list_flows +- [ ] remove_flow_output +- [ ] revoke_flow_entitlement +- [ ] start_flow +- [ ] stop_flow +- [ ] update_flow_entitlement +- [ ] update_flow_output +- [ ] update_flow_source + ## mediaconvert - 0% implemented +- [ ] associate_certificate - [ ] cancel_job - [ ] create_job - [ ] create_job_template @@ -3057,6 +3572,7 @@ - [ ] delete_preset - [ ] delete_queue - [ ] describe_endpoints +- [ ] disassociate_certificate - [ ] get_job - [ ] get_job_template - [ ] get_preset @@ -3109,6 +3625,7 @@ - [ ] list_channels - [ ] list_origin_endpoints - [ ] rotate_channel_credentials +- [ ] rotate_ingest_endpoint_credentials - [ ] update_channel - [ ] update_origin_endpoint @@ -3117,12 +3634,15 @@ - [ ] delete_container - [ ] delete_container_policy - [ ] delete_cors_policy +- [ ] delete_lifecycle_policy - [ ] describe_container - [ ] get_container_policy - [ ] get_cors_policy +- [ ] get_lifecycle_policy - [ ] list_containers - [ ] put_container_policy - [ ] put_cors_policy +- [ ] put_lifecycle_policy ## mediastore-data - 0% implemented - [ ] delete_object @@ -3140,6 +3660,7 @@ ## meteringmarketplace - 0% implemented - [ ] batch_meter_usage - [ ] meter_usage +- [ ] register_usage - [ ] resolve_customer ## mgh - 0% implemented @@ -3174,8 +3695,10 @@ ## mq - 0% implemented - [ ] create_broker - [ ] create_configuration +- [ ] create_tags - [ ] create_user - [ ] delete_broker +- [ ] delete_tags - [ ] delete_user - [ ] describe_broker - [ ] describe_configuration @@ -3184,6 +3707,7 @@ - [ ] list_brokers - [ ] list_configuration_revisions - [ ] list_configurations +- [ ] list_tags - [ ] list_users - [ ] reboot_broker - [ ] update_broker @@ -3378,6 +3902,7 @@ - [ ] describe_node_association_status - [ ] describe_servers - [ ] disassociate_node +- [ ] export_server_engine_attribute - [ ] restore_server - [ ] start_maintenance - [ ] update_server @@ -3452,6 +3977,7 @@ - [ ] delete_segment - [ ] delete_sms_channel - [ ] delete_user_endpoints +- [ ] delete_voice_channel - [ ] get_adm_channel - [ ] get_apns_channel - [ ] get_apns_sandbox_channel @@ -3483,6 +4009,7 @@ - [ ] get_segments - [ ] get_sms_channel - [ ] get_user_endpoints +- [ ] get_voice_channel - [ ] phone_number_validate - [ ] put_event_stream - [ ] put_events @@ -3503,6 +4030,46 @@ - [ ] update_gcm_channel - [ ] update_segment - [ ] update_sms_channel +- [ ] update_voice_channel + +## pinpoint-email - 0% implemented +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_dedicated_ip_pool +- [ ] create_deliverability_test_report +- [ ] create_email_identity +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_dedicated_ip_pool +- [ ] delete_email_identity +- [ ] get_account +- [ ] get_blacklist_reports +- [ ] get_configuration_set +- [ ] get_configuration_set_event_destinations +- [ ] get_dedicated_ip +- [ ] get_dedicated_ips +- [ ] get_deliverability_dashboard_options +- [ ] get_deliverability_test_report +- [ ] get_domain_statistics_report +- [ ] get_email_identity +- [ ] list_configuration_sets +- [ ] list_dedicated_ip_pools +- [ ] list_deliverability_test_reports +- [ ] list_email_identities +- [ ] put_account_dedicated_ip_warmup_attributes +- [ ] put_account_sending_attributes +- [ ] put_configuration_set_delivery_options +- [ ] put_configuration_set_reputation_options +- [ ] put_configuration_set_sending_options +- [ ] put_configuration_set_tracking_options +- [ ] put_dedicated_ip_in_pool +- [ ] put_dedicated_ip_warmup_attributes +- [ ] put_deliverability_dashboard_option +- [ ] put_email_identity_dkim_attributes +- [ ] put_email_identity_feedback_attributes +- [ ] put_email_identity_mail_from_attributes +- [ ] send_email +- [ ] update_configuration_set_event_destination ## polly - 55% implemented - [X] delete_lexicon @@ -3520,6 +4087,41 @@ - [ ] get_attribute_values - [ ] get_products +## quicksight - 0% implemented +- [ ] create_group +- [ ] create_group_membership +- [ ] delete_group +- [ ] delete_group_membership +- [ ] delete_user +- [ ] describe_group +- [ ] describe_user +- [ ] get_dashboard_embed_url +- [ ] list_group_memberships +- [ ] list_groups +- [ ] list_user_groups +- [ ] list_users +- [ ] register_user +- [ ] update_group +- [ ] update_user + +## ram - 0% implemented +- [ ] accept_resource_share_invitation +- [ ] associate_resource_share +- [ ] create_resource_share +- [ ] delete_resource_share +- [ ] disassociate_resource_share +- [ ] enable_sharing_with_aws_organization +- [ ] get_resource_policies +- [ ] get_resource_share_associations +- [ ] get_resource_share_invitations +- [ ] get_resource_shares +- [ ] list_principals +- [ ] list_resources +- [ ] reject_resource_share_invitation +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_resource_share + ## rds - 0% implemented - [ ] add_role_to_db_cluster - [ ] add_source_identifier_to_subscription @@ -3533,6 +4135,7 @@ - [ ] copy_db_snapshot - [ ] copy_option_group - [ ] create_db_cluster +- [ ] create_db_cluster_endpoint - [ ] create_db_cluster_parameter_group - [ ] create_db_cluster_snapshot - [ ] create_db_instance @@ -3542,26 +4145,32 @@ - [ ] create_db_snapshot - [ ] create_db_subnet_group - [ ] create_event_subscription +- [ ] create_global_cluster - [ ] create_option_group - [ ] delete_db_cluster +- [ ] delete_db_cluster_endpoint - [ ] delete_db_cluster_parameter_group - [ ] delete_db_cluster_snapshot - [ ] delete_db_instance +- [ ] delete_db_instance_automated_backup - [ ] delete_db_parameter_group - [ ] delete_db_security_group - [ ] delete_db_snapshot - [ ] delete_db_subnet_group - [ ] delete_event_subscription +- [ ] delete_global_cluster - [ ] delete_option_group - [ ] describe_account_attributes - [ ] describe_certificates - [ ] describe_db_cluster_backtracks +- [ ] describe_db_cluster_endpoints - [ ] describe_db_cluster_parameter_groups - [ ] describe_db_cluster_parameters - [ ] describe_db_cluster_snapshot_attributes - [ ] describe_db_cluster_snapshots - [ ] describe_db_clusters - [ ] describe_db_engine_versions +- [ ] describe_db_instance_automated_backups - [ ] describe_db_instances - [ ] describe_db_log_files - [ ] describe_db_parameter_groups @@ -3575,6 +4184,7 @@ - [ ] describe_event_categories - [ ] describe_event_subscriptions - [ ] describe_events +- [ ] describe_global_clusters - [ ] describe_option_group_options - [ ] describe_option_groups - [ ] describe_orderable_db_instance_options @@ -3588,6 +4198,7 @@ - [ ] list_tags_for_resource - [ ] modify_current_db_cluster_capacity - [ ] modify_db_cluster +- [ ] modify_db_cluster_endpoint - [ ] modify_db_cluster_parameter_group - [ ] modify_db_cluster_snapshot_attribute - [ ] modify_db_instance @@ -3596,11 +4207,13 @@ - [ ] modify_db_snapshot_attribute - [ ] modify_db_subnet_group - [ ] modify_event_subscription +- [ ] modify_global_cluster - [ ] modify_option_group - [ ] promote_read_replica - [ ] promote_read_replica_db_cluster - [ ] purchase_reserved_db_instances_offering - [ ] reboot_db_instance +- [ ] remove_from_global_cluster - [ ] remove_role_from_db_cluster - [ ] remove_source_identifier_from_subscription - [ ] remove_tags_from_resource @@ -3613,13 +4226,21 @@ - [ ] restore_db_instance_from_s3 - [ ] restore_db_instance_to_point_in_time - [ ] revoke_db_security_group_ingress +- [ ] start_db_cluster - [ ] start_db_instance +- [ ] stop_db_cluster - [ ] stop_db_instance -## redshift - 37% implemented +## rds-data - 0% implemented +- [ ] execute_sql + +## redshift - 32% implemented - [ ] accept_reserved_node_exchange - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access +- [ ] batch_delete_cluster_snapshots +- [ ] batch_modify_cluster_snapshots +- [ ] cancel_resize - [ ] copy_cluster_snapshot - [X] create_cluster - [X] create_cluster_parameter_group @@ -3630,6 +4251,7 @@ - [ ] create_hsm_client_certificate - [ ] create_hsm_configuration - [X] create_snapshot_copy_grant +- [ ] create_snapshot_schedule - [X] create_tags - [X] delete_cluster - [X] delete_cluster_parameter_group @@ -3640,7 +4262,9 @@ - [ ] delete_hsm_client_certificate - [ ] delete_hsm_configuration - [X] delete_snapshot_copy_grant +- [ ] delete_snapshot_schedule - [X] delete_tags +- [ ] describe_account_attributes - [ ] describe_cluster_db_revisions - [X] describe_cluster_parameter_groups - [ ] describe_cluster_parameters @@ -3662,6 +4286,8 @@ - [ ] describe_reserved_nodes - [ ] describe_resize - [X] describe_snapshot_copy_grants +- [ ] describe_snapshot_schedules +- [ ] describe_storage - [ ] describe_table_restore_status - [X] describe_tags - [ ] disable_logging @@ -3673,10 +4299,14 @@ - [X] modify_cluster - [ ] modify_cluster_db_revision - [ ] modify_cluster_iam_roles +- [ ] modify_cluster_maintenance - [ ] modify_cluster_parameter_group +- [ ] modify_cluster_snapshot +- [ ] modify_cluster_snapshot_schedule - [ ] modify_cluster_subnet_group - [ ] modify_event_subscription - [X] modify_snapshot_copy_retention_period +- [ ] modify_snapshot_schedule - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group @@ -3744,6 +4374,40 @@ - [ ] tag_resources - [ ] untag_resources +## robomaker - 0% implemented +- [ ] batch_describe_simulation_job +- [ ] cancel_simulation_job +- [ ] create_deployment_job +- [ ] create_fleet +- [ ] create_robot +- [ ] create_robot_application +- [ ] create_robot_application_version +- [ ] create_simulation_application +- [ ] create_simulation_application_version +- [ ] create_simulation_job +- [ ] delete_fleet +- [ ] delete_robot +- [ ] delete_robot_application +- [ ] delete_simulation_application +- [ ] deregister_robot +- [ ] describe_deployment_job +- [ ] describe_fleet +- [ ] describe_robot +- [ ] describe_robot_application +- [ ] describe_simulation_application +- [ ] describe_simulation_job +- [ ] list_deployment_jobs +- [ ] list_fleets +- [ ] list_robot_applications +- [ ] list_robots +- [ ] list_simulation_applications +- [ ] list_simulation_jobs +- [ ] register_robot +- [ ] restart_simulation_job +- [ ] sync_deployment_job +- [ ] update_robot_application +- [ ] update_simulation_application + ## route53 - 12% implemented - [ ] associate_vpc_with_hosted_zone - [ ] change_resource_record_sets @@ -3828,7 +4492,31 @@ - [ ] update_tags_for_domain - [ ] view_billing -## s3 - 15% implemented +## route53resolver - 0% implemented +- [ ] associate_resolver_endpoint_ip_address +- [ ] associate_resolver_rule +- [ ] create_resolver_endpoint +- [ ] create_resolver_rule +- [ ] delete_resolver_endpoint +- [ ] delete_resolver_rule +- [ ] disassociate_resolver_endpoint_ip_address +- [ ] disassociate_resolver_rule +- [ ] get_resolver_endpoint +- [ ] get_resolver_rule +- [ ] get_resolver_rule_association +- [ ] get_resolver_rule_policy +- [ ] list_resolver_endpoint_ip_addresses +- [ ] list_resolver_endpoints +- [ ] list_resolver_rule_associations +- [ ] list_resolver_rules +- [ ] list_tags_for_resource +- [ ] put_resolver_rule_policy +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_resolver_endpoint +- [ ] update_resolver_rule + +## s3 - 13% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -3848,6 +4536,7 @@ - [ ] delete_object - [ ] delete_object_tagging - [ ] delete_objects +- [ ] delete_public_access_block - [ ] get_bucket_accelerate_configuration - [X] get_bucket_acl - [ ] get_bucket_analytics_configuration @@ -3862,6 +4551,7 @@ - [ ] get_bucket_notification - [ ] get_bucket_notification_configuration - [X] get_bucket_policy +- [ ] get_bucket_policy_status - [ ] get_bucket_replication - [ ] get_bucket_request_payment - [ ] get_bucket_tagging @@ -3869,8 +4559,12 @@ - [ ] get_bucket_website - [ ] get_object - [ ] get_object_acl +- [ ] get_object_legal_hold +- [ ] get_object_lock_configuration +- [ ] get_object_retention - [ ] get_object_tagging - [ ] get_object_torrent +- [ ] get_public_access_block - [ ] head_bucket - [ ] head_object - [ ] list_bucket_analytics_configurations @@ -3902,56 +4596,97 @@ - [ ] put_bucket_website - [ ] put_object - [ ] put_object_acl +- [ ] put_object_legal_hold +- [ ] put_object_lock_configuration +- [ ] put_object_retention - [ ] put_object_tagging +- [ ] put_public_access_block - [ ] restore_object - [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +## s3control - 0% implemented +- [ ] delete_public_access_block +- [ ] get_public_access_block +- [ ] put_public_access_block + ## sagemaker - 0% implemented - [ ] add_tags +- [ ] create_algorithm +- [ ] create_code_repository +- [ ] create_compilation_job - [ ] create_endpoint - [ ] create_endpoint_config - [ ] create_hyper_parameter_tuning_job +- [ ] create_labeling_job - [ ] create_model +- [ ] create_model_package - [ ] create_notebook_instance - [ ] create_notebook_instance_lifecycle_config - [ ] create_presigned_notebook_instance_url - [ ] create_training_job - [ ] create_transform_job +- [ ] create_workteam +- [ ] delete_algorithm +- [ ] delete_code_repository - [ ] delete_endpoint - [ ] delete_endpoint_config - [ ] delete_model +- [ ] delete_model_package - [ ] delete_notebook_instance - [ ] delete_notebook_instance_lifecycle_config - [ ] delete_tags +- [ ] delete_workteam +- [ ] describe_algorithm +- [ ] describe_code_repository +- [ ] describe_compilation_job - [ ] describe_endpoint - [ ] describe_endpoint_config - [ ] describe_hyper_parameter_tuning_job +- [ ] describe_labeling_job - [ ] describe_model +- [ ] describe_model_package - [ ] describe_notebook_instance - [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_subscribed_workteam - [ ] describe_training_job - [ ] describe_transform_job +- [ ] describe_workteam +- [ ] get_search_suggestions +- [ ] list_algorithms +- [ ] list_code_repositories +- [ ] list_compilation_jobs - [ ] list_endpoint_configs - [ ] list_endpoints - [ ] list_hyper_parameter_tuning_jobs +- [ ] list_labeling_jobs +- [ ] list_labeling_jobs_for_workteam +- [ ] list_model_packages - [ ] list_models - [ ] list_notebook_instance_lifecycle_configs - [ ] list_notebook_instances +- [ ] list_subscribed_workteams - [ ] list_tags - [ ] list_training_jobs - [ ] list_training_jobs_for_hyper_parameter_tuning_job - [ ] list_transform_jobs +- [ ] list_workteams +- [ ] render_ui_template +- [ ] search - [ ] start_notebook_instance +- [ ] stop_compilation_job - [ ] stop_hyper_parameter_tuning_job +- [ ] stop_labeling_job - [ ] stop_notebook_instance - [ ] stop_training_job - [ ] stop_transform_job +- [ ] update_code_repository - [ ] update_endpoint - [ ] update_endpoint_weights_and_capacities - [ ] update_notebook_instance - [ ] update_notebook_instance_lifecycle_config +- [ ] update_workteam ## sagemaker-runtime - 0% implemented - [ ] invoke_endpoint @@ -3988,13 +4723,47 @@ - [ ] update_secret - [ ] update_secret_version_stage +## securityhub - 0% implemented +- [ ] accept_invitation +- [ ] batch_disable_standards +- [ ] batch_enable_standards +- [ ] batch_import_findings +- [ ] create_insight +- [ ] create_members +- [ ] decline_invitations +- [ ] delete_insight +- [ ] delete_invitations +- [ ] delete_members +- [ ] disable_import_findings_for_product +- [ ] disable_security_hub +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] enable_import_findings_for_product +- [ ] enable_security_hub +- [ ] get_enabled_standards +- [ ] get_findings +- [ ] get_insight_results +- [ ] get_insights +- [ ] get_invitations_count +- [ ] get_master_account +- [ ] get_members +- [ ] invite_members +- [ ] list_enabled_products_for_import +- [ ] list_invitations +- [ ] list_members +- [ ] update_findings +- [ ] update_insight + ## serverlessrepo - 0% implemented - [ ] create_application - [ ] create_application_version - [ ] create_cloud_formation_change_set +- [ ] create_cloud_formation_template - [ ] delete_application - [ ] get_application - [ ] get_application_policy +- [ ] get_cloud_formation_template +- [ ] list_application_dependencies - [ ] list_application_versions - [ ] list_applications - [ ] put_application_policy @@ -4004,7 +4773,10 @@ - [ ] accept_portfolio_share - [ ] associate_principal_with_portfolio - [ ] associate_product_with_portfolio +- [ ] associate_service_action_with_provisioning_artifact - [ ] associate_tag_option_with_resource +- [ ] batch_associate_service_action_with_provisioning_artifact +- [ ] batch_disassociate_service_action_from_provisioning_artifact - [ ] copy_product - [ ] create_constraint - [ ] create_portfolio @@ -4012,6 +4784,7 @@ - [ ] create_product - [ ] create_provisioned_product_plan - [ ] create_provisioning_artifact +- [ ] create_service_action - [ ] create_tag_option - [ ] delete_constraint - [ ] delete_portfolio @@ -4019,10 +4792,12 @@ - [ ] delete_product - [ ] delete_provisioned_product_plan - [ ] delete_provisioning_artifact +- [ ] delete_service_action - [ ] delete_tag_option - [ ] describe_constraint - [ ] describe_copy_product_status - [ ] describe_portfolio +- [ ] describe_portfolio_share_status - [ ] describe_product - [ ] describe_product_as_admin - [ ] describe_product_view @@ -4031,22 +4806,32 @@ - [ ] describe_provisioning_artifact - [ ] describe_provisioning_parameters - [ ] describe_record +- [ ] describe_service_action - [ ] describe_tag_option +- [ ] disable_aws_organizations_access - [ ] disassociate_principal_from_portfolio - [ ] disassociate_product_from_portfolio +- [ ] disassociate_service_action_from_provisioning_artifact - [ ] disassociate_tag_option_from_resource +- [ ] enable_aws_organizations_access - [ ] execute_provisioned_product_plan +- [ ] execute_provisioned_product_service_action +- [ ] get_aws_organizations_access_status - [ ] list_accepted_portfolio_shares - [ ] list_constraints_for_portfolio - [ ] list_launch_paths +- [ ] list_organization_portfolio_access - [ ] list_portfolio_access - [ ] list_portfolios - [ ] list_portfolios_for_product - [ ] list_principals_for_portfolio - [ ] list_provisioned_product_plans - [ ] list_provisioning_artifacts +- [ ] list_provisioning_artifacts_for_service_action - [ ] list_record_history - [ ] list_resources_for_tag_option +- [ ] list_service_actions +- [ ] list_service_actions_for_provisioning_artifact - [ ] list_tag_options - [ ] provision_product - [ ] reject_portfolio_share @@ -4060,15 +4845,18 @@ - [ ] update_product - [ ] update_provisioned_product - [ ] update_provisioning_artifact +- [ ] update_service_action - [ ] update_tag_option ## servicediscovery - 0% implemented +- [ ] create_http_namespace - [ ] create_private_dns_namespace - [ ] create_public_dns_namespace - [ ] create_service - [ ] delete_namespace - [ ] delete_service - [ ] deregister_instance +- [ ] discover_instances - [ ] get_instance - [ ] get_instances_health_status - [ ] get_namespace @@ -4174,19 +4962,56 @@ - [ ] update_emergency_contact_settings - [ ] update_subscription +## signer - 0% implemented +- [ ] cancel_signing_profile +- [ ] describe_signing_job +- [ ] get_signing_platform +- [ ] get_signing_profile +- [ ] list_signing_jobs +- [ ] list_signing_platforms +- [ ] list_signing_profiles +- [ ] put_signing_profile +- [ ] start_signing_job + ## sms - 0% implemented +- [ ] create_app - [ ] create_replication_job +- [ ] delete_app +- [ ] delete_app_launch_configuration +- [ ] delete_app_replication_configuration - [ ] delete_replication_job - [ ] delete_server_catalog - [ ] disassociate_connector +- [ ] generate_change_set +- [ ] generate_template +- [ ] get_app +- [ ] get_app_launch_configuration +- [ ] get_app_replication_configuration - [ ] get_connectors - [ ] get_replication_jobs - [ ] get_replication_runs - [ ] get_servers - [ ] import_server_catalog +- [ ] launch_app +- [ ] list_apps +- [ ] put_app_launch_configuration +- [ ] put_app_replication_configuration +- [ ] start_app_replication - [ ] start_on_demand_replication_run +- [ ] stop_app_replication +- [ ] terminate_app +- [ ] update_app - [ ] update_replication_job +## sms-voice - 0% implemented +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] get_configuration_set_event_destinations +- [ ] send_voice_message +- [ ] update_configuration_set_event_destination + ## snowball - 0% implemented - [ ] cancel_cluster - [ ] cancel_job @@ -4261,9 +5086,10 @@ - [X] tag_queue - [X] untag_queue -## ssm - 11% implemented +## ssm - 10% implemented - [X] add_tags_to_resource - [ ] cancel_command +- [ ] cancel_maintenance_window_execution - [ ] create_activation - [ ] create_association - [ ] create_association_batch @@ -4304,15 +5130,19 @@ - [ ] describe_maintenance_window_execution_task_invocations - [ ] describe_maintenance_window_execution_tasks - [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_schedule - [ ] describe_maintenance_window_targets - [ ] describe_maintenance_window_tasks - [ ] describe_maintenance_windows +- [ ] describe_maintenance_windows_for_target - [ ] describe_parameters - [ ] describe_patch_baselines - [ ] describe_patch_group_state - [ ] describe_patch_groups +- [ ] describe_sessions - [ ] get_automation_execution - [X] get_command_invocation +- [ ] get_connection_status - [ ] get_default_patch_baseline - [ ] get_deployable_patch_snapshot_for_instance - [ ] get_document @@ -4351,11 +5181,14 @@ - [ ] register_target_with_maintenance_window - [ ] register_task_with_maintenance_window - [X] remove_tags_from_resource +- [ ] resume_session - [ ] send_automation_signal - [X] send_command - [ ] start_associations_once - [ ] start_automation_execution +- [ ] start_session - [ ] stop_automation_execution +- [ ] terminate_session - [ ] update_association - [ ] update_association_status - [ ] update_document @@ -4518,6 +5351,7 @@ ## transcribe - 0% implemented - [ ] create_vocabulary +- [ ] delete_transcription_job - [ ] delete_vocabulary - [ ] get_transcription_job - [ ] get_vocabulary @@ -4526,7 +5360,31 @@ - [ ] start_transcription_job - [ ] update_vocabulary +## transfer - 0% implemented +- [ ] create_server +- [ ] create_user +- [ ] delete_server +- [ ] delete_ssh_public_key +- [ ] delete_user +- [ ] describe_server +- [ ] describe_user +- [ ] import_ssh_public_key +- [ ] list_servers +- [ ] list_tags_for_resource +- [ ] list_users +- [ ] start_server +- [ ] stop_server +- [ ] tag_resource +- [ ] test_identity_provider +- [ ] untag_resource +- [ ] update_server +- [ ] update_user + ## translate - 0% implemented +- [ ] delete_terminology +- [ ] get_terminology +- [ ] import_terminology +- [ ] list_terminologies - [ ] translate_text ## waf - 0% implemented @@ -4545,6 +5403,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_logging_configuration - [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set @@ -4560,6 +5419,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_logging_configuration - [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys @@ -4576,6 +5436,7 @@ - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets +- [ ] list_logging_configurations - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets @@ -4586,6 +5447,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_logging_configuration - [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set @@ -4617,6 +5479,7 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_logging_configuration - [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set @@ -4633,6 +5496,7 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_logging_configuration - [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys @@ -4650,6 +5514,7 @@ - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets +- [ ] list_logging_configurations - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets @@ -4661,6 +5526,7 @@ - [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_logging_configuration - [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set @@ -4709,6 +5575,7 @@ - [ ] get_document_version - [ ] get_folder - [ ] get_folder_path +- [ ] get_resources - [ ] initiate_document_version_upload - [ ] remove_all_resource_permissions - [ ] remove_resource_permission @@ -4758,13 +5625,22 @@ - [ ] create_workspaces - [ ] delete_ip_group - [ ] delete_tags +- [ ] delete_workspace_image +- [ ] describe_account +- [ ] describe_account_modifications +- [ ] describe_client_properties - [ ] describe_ip_groups - [ ] describe_tags - [ ] describe_workspace_bundles - [ ] describe_workspace_directories +- [ ] describe_workspace_images - [ ] describe_workspaces - [ ] describe_workspaces_connection_status - [ ] disassociate_ip_groups +- [ ] import_workspace_image +- [ ] list_available_management_cidr_ranges +- [ ] modify_account +- [ ] modify_client_properties - [ ] modify_workspace_properties - [ ] modify_workspace_state - [ ] reboot_workspaces @@ -4777,10 +5653,21 @@ ## xray - 0% implemented - [ ] batch_get_traces +- [ ] create_group +- [ ] create_sampling_rule +- [ ] delete_group +- [ ] delete_sampling_rule - [ ] get_encryption_config +- [ ] get_group +- [ ] get_groups +- [ ] get_sampling_rules +- [ ] get_sampling_statistic_summaries +- [ ] get_sampling_targets - [ ] get_service_graph - [ ] get_trace_graph - [ ] get_trace_summaries - [ ] put_encryption_config - [ ] put_telemetry_records - [ ] put_trace_segments +- [ ] update_group +- [ ] update_sampling_rule diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 7bbdb706d..72cf735b2 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -1,33 +1,42 @@ -from __future__ import unicode_literals -from moto.core.exceptions import JsonRESTError - - -class IoTClientError(JsonRESTError): - code = 400 - - -class ResourceNotFoundException(IoTClientError): - def __init__(self): - self.code = 404 - super(ResourceNotFoundException, self).__init__( - "ResourceNotFoundException", - "The specified resource does not exist" - ) - - -class InvalidRequestException(IoTClientError): - def __init__(self, msg=None): - self.code = 400 - super(InvalidRequestException, self).__init__( - "InvalidRequestException", - msg or "The request is not valid." - ) - - -class VersionConflictException(IoTClientError): - def __init__(self, name): - self.code = 409 - super(VersionConflictException, self).__init__( - 'VersionConflictException', - 'The version for thing %s does not match the expected version.' % name - ) +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTClientError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + msg or "The request is not valid." + ) + + +class InvalidStateTransitionException(IoTClientError): + def __init__(self, msg=None): + self.code = 409 + super(InvalidStateTransitionException, self).__init__( + "InvalidStateTransitionException", + msg or "An attempt was made to change to an invalid state." + ) + + +class VersionConflictException(IoTClientError): + def __init__(self, name): + self.code = 409 + super(VersionConflictException, self).__init__( + 'VersionConflictException', + 'The version for thing %s does not match the expected version.' % name + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1279a5baa..a5128dcb2 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -15,6 +15,7 @@ from moto.core import BaseBackend, BaseModel from .exceptions import ( ResourceNotFoundException, InvalidRequestException, + InvalidStateTransitionException, VersionConflictException ) @@ -247,7 +248,6 @@ class FakeJob(BaseModel): self.document_parameters = document_parameters def to_dict(self): - obj = { 'jobArn': self.job_arn, 'jobId': self.job_id, @@ -260,7 +260,7 @@ class FakeJob(BaseModel): 'comment': self.comment, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, + 'completedAt': self.completed_at, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -290,13 +290,13 @@ class FakeJobExecution(BaseModel): self.version_number = 123 self.approximate_seconds_before_time_out = 123 - def to_dict(self): + def to_get_dict(self): obj = { 'jobId': self.job_id, 'status': self.status, - 'forceCancel': self.force_canceled, + 'forceCanceled': self.force_canceled, 'statusDetails': {'detailsMap': self.status_details_map}, - 'thing_arn': self.thing_arn, + 'thingArn': self.thing_arn, 'queuedAt': self.queued_at, 'startedAt': self.started_at, 'lastUpdatedAt': self.last_updated_at, @@ -307,6 +307,21 @@ class FakeJobExecution(BaseModel): return obj + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'thingArn': self.thing_arn, + 'jobExecutionSummary': { + 'status': self.status, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + } + } + + return obj + class IoTBackend(BaseBackend): def __init__(self, region_name=None): @@ -760,24 +775,114 @@ class IoTBackend(BaseBackend): self.jobs[job_id] = job for thing_arn in targets: - thing_name = thing_arn.split(':')[-1] + thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution return job.job_arn, job_id, description def describe_job(self, job_id): - return self.jobs[job_id] + jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] + if len(jobs) == 0: + raise ResourceNotFoundException() + return jobs[0] def get_job_document(self, job_id): return self.jobs[job_id] + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + def describe_job_execution(self, job_id, thing_name, execution_number): - # TODO filter with execution number - return self.job_executions[(job_id, thing_name)] + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je] for je in self.job_executions if je[0] == job_id] - # TODO: implement filters + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + return job_executions, next_token diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 14302cc2f..577992e7b 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -160,15 +160,83 @@ class IoTResponse(BaseResponse): # TODO: needs to be implemented to get document_source's content from S3 return json.dumps({'document': ''}) - def list_job_executions_for_job(self): - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=self._get_param("jobId"), - status=self._get_param("status"), - max_results=self._get_param( - "maxResults"), - next_token=self._get_param( - "nextToken")) + def list_jobs(self): + status = self._get_param("status"), + target_selection = self._get_param("targetSelection"), + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + previous_next_token = self._get_param("nextToken") + thing_group_name = self._get_param("thingGroupName"), + thing_group_id = self._get_param("thingGroupId") + jobs, next_token = self.iot_backend.list_jobs(status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id) - return json.dumps(dict(executionSummaries=[_.to_dict() for _ in job_executions], nextToken=next_token)) + return json.dumps(dict(jobs=jobs, nextToken=next_token)) + + def describe_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + job_execution = self.iot_backend.describe_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number) + + return json.dumps(dict(execution=job_execution.to_get_dict())) + + def cancel_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + force = self._get_bool_param("force") + expected_version = self._get_int_param("expectedVersion") + status_details = self._get_param("statusDetails") + + self.iot_backend.cancel_job_execution(job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details) + + return json.dumps(dict()) + + def delete_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + force = self._get_bool_param("force") + + self.iot_backend.delete_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force) + + return json.dumps(dict()) + + def list_job_executions_for_job(self): + job_id = self._get_param("jobId") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def list_job_executions_for_thing(self): + thing_name = self._get_param("thingName") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) def create_keys_and_certificate(self): set_as_active = self._get_bool_param("setAsActive") diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index d5f277d1d..d39e9818a 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -4,9 +4,9 @@ import json import sure #noqa import boto3 +from botocore.exceptions import ClientError from moto import mock_iot - @mock_iot def test_attach_policy(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -711,6 +711,69 @@ def test_create_job(): job.should.have.key('description') +@mock_iot +def test_list_jobs(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job1 = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job1.should.have.key('jobId').which.should.equal(job_id) + job1.should.have.key('jobArn') + job1.should.have.key('description') + + job2 = client.create_job( + jobId=job_id+"1", + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job2.should.have.key('jobId').which.should.equal(job_id+"1") + job2.should.have.key('jobArn') + job2.should.have.key('description') + + jobs = client.list_jobs() + jobs.should.have.key('jobs') + jobs.should_not.have.key('nextToken') + jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) + jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") + + @mock_iot def test_describe_job(): client = boto3.client('iot', region_name='eu-west-1') @@ -875,6 +938,162 @@ def test_get_job_document_with_document(): job_document = client.get_job_document(jobId=job_id) job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + +@mock_iot +def test_describe_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_cancel_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.cancel_job_execution(jobId=job_id, thingName=name) + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') + + +@mock_iot +def test_delete_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + @mock_iot def test_list_job_executions_for_job(): client = boto3.client('iot', region_name='eu-west-1') @@ -911,5 +1130,44 @@ def test_list_job_executions_for_job(): job_execution = client.list_job_executions_for_job(jobId=job_id) job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + + +@mock_iot +def test_list_job_executions_for_thing(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_thing(thingName=name) + job_execution.should.have.key('executionSummaries') job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) From 82f476bb46db1def6a485d056fe618acd7f64897 Mon Sep 17 00:00:00 2001 From: Stephan Date: Wed, 9 Jan 2019 16:18:22 +0100 Subject: [PATCH 011/125] adding more job mocks for IoT service --- moto/iot/models.py | 35 ++++++++++++++++- moto/iot/responses.py | 45 +++++++++++---------- tests/test_iot/test_iot.py | 80 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 22 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index a5128dcb2..ee4211f53 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -226,12 +226,14 @@ class FakeJob(BaseModel): self.targets = targets self.document_source = document_source self.document = document + self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config - self.status = None # IN_PROGRESS | CANCELED | COMPLETED + self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None + self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -258,6 +260,8 @@ class FakeJob(BaseModel): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, 'completedAt': self.completed_at, @@ -778,7 +782,7 @@ class IoTBackend(BaseBackend): thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution - return job.job_arn, job_id, description + return job def describe_job(self, job_id): jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] @@ -786,6 +790,33 @@ class IoTBackend(BaseBackend): raise ResourceNotFoundException() return jobs[0] + def delete_job(self, job_id, force): + job = self.jobs[job_id] + + if job.status == 'IN_PROGRESS' and force: + del self.jobs[job_id] + elif job.status != 'IN_PROGRESS': + del self.jobs[job_id] + else: + raise InvalidStateTransitionException() + + def cancel_job(self, job_id, reason_code, comment, force): + job = self.jobs[job_id] + + job.reason_code = reason_code if reason_code is not None else job.reason_code + job.comment = comment if comment is not None else job.comment + job.force = force if force is not None and force != job.force else job.force + job.status = 'CANCELED' + + if job.status == 'IN_PROGRESS' and force: + self.jobs[job_id] = job + elif job.status != 'IN_PROGRESS': + self.jobs[job_id] = job + else: + raise InvalidStateTransitionException() + + return job + def get_job_document(self, job_id): return self.jobs[job_id] diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 577992e7b..3dc95e9f6 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -115,7 +115,7 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( + job = self.iot_backend.create_job( job_id=self._get_param("jobId"), targets=self._get_param("targets"), description=self._get_param("description"), @@ -127,28 +127,33 @@ class IoTResponse(BaseResponse): document_parameters=self._get_param("documentParameters") ) - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + return json.dumps(job.to_dict()) def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) + return json.dumps(dict(documentSource=job.document_source, job=job.to_dict())) + + def delete_job(self): + job_id = self._get_param("jobId") + force = self._get_bool_param("force") + + self.iot_backend.delete_job(job_id=job_id, + force=force) + + return json.dumps(dict()) + + def cancel_job(self): + job_id = self._get_param("jobId") + reason_code = self._get_param("reasonCode") + comment = self._get_param("comment") + force = self._get_bool_param("force") + + job = self.iot_backend.cancel_job(job_id=job_id, + reason_code=reason_code, + comment=comment, + force=force) + + return json.dumps(job.to_dict()) def get_job_document(self): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index d39e9818a..3cf412796 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -872,6 +872,86 @@ def test_describe_job_1(): "maximumPerMinute").which.should.equal(10) +@mock_iot +def test_delete_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + client.delete_job(jobId=job_id) + + client.list_jobs()['jobs'].should.have.length_of(0) + + +@mock_iot +def test_cancel_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') + job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) + job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') + job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') + + @mock_iot def test_get_job_document_with_document_source(): client = boto3.client('iot', region_name='eu-west-1') From ded89416fbffe2b40a4b83e94c663f7c08ecc6ae Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 14 Jan 2019 12:19:43 +0100 Subject: [PATCH 012/125] updated implementation coverage --- IMPLEMENTATION_COVERAGE.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index fcfe31835..8ac5f8e5e 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -2871,7 +2871,7 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 31% implemented +## iot - 33% implemented - [ ] accept_certificate_transfer - [ ] add_thing_to_billing_group - [X] add_thing_to_thing_group @@ -2882,7 +2882,7 @@ - [X] attach_thing_principal - [ ] cancel_audit_task - [ ] cancel_certificate_transfer -- [ ] cancel_job +- [X] cancel_job - [X] cancel_job_execution - [ ] clear_default_authorizer - [ ] create_authorizer @@ -2908,7 +2908,7 @@ - [ ] delete_ca_certificate - [X] delete_certificate - [ ] delete_dynamic_thing_group -- [ ] delete_job +- [X] delete_job - [X] delete_job_execution - [ ] delete_ota_update - [X] delete_policy @@ -5213,11 +5213,14 @@ - [ ] list_activities - [ ] list_executions - [ ] list_state_machines +- [ ] list_tags_for_resource - [ ] send_task_failure - [ ] send_task_heartbeat - [ ] send_task_success - [ ] start_execution - [ ] stop_execution +- [ ] tag_resource +- [ ] untag_resource - [ ] update_state_machine ## storagegateway - 0% implemented From 498419462dcf4f28846fb95d52868185f8834e34 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 13:26:33 +0200 Subject: [PATCH 013/125] updaated --- file.tmp | 9 +++ moto/iot/exceptions.py | 11 ++- moto/iot/models.py | 149 ++++++++++++------------------------- moto/iot/responses.py | 50 ++++++++----- tests/test_iot/test_iot.py | 134 ++++++++++++++++++++++++++++++++- 5 files changed, 231 insertions(+), 122 deletions(-) create mode 100644 file.tmp diff --git a/file.tmp b/file.tmp new file mode 100644 index 000000000..0b91630a9 --- /dev/null +++ b/file.tmp @@ -0,0 +1,9 @@ + + AWSTemplateFormatVersion: '2010-09-09' + Description: Simple CloudFormation Test Template + Resources: + S3Bucket: + Type: AWS::S3::Bucket + Properties: + AccessControl: PublicRead + BucketName: cf-test-bucket-1 diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index b8b3f1e84..b5725d8fe 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -24,6 +24,15 @@ class InvalidRequestException(IoTClientError): ) +class InvalidStateTransitionException(IoTClientError): + def __init__(self, msg=None): + self.code = 409 + super(InvalidStateTransitionException, self).__init__( + "InvalidStateTransitionException", + msg or "An attempt was made to change to an invalid state." + ) + + class VersionConflictException(IoTClientError): def __init__(self, name): self.code = 409 @@ -47,4 +56,4 @@ class DeleteConflictException(IoTClientError): self.code = 409 super(DeleteConflictException, self).__init__( 'DeleteConflictException', msg - ) \ No newline at end of file + ) diff --git a/moto/iot/models.py b/moto/iot/models.py index ee4211f53..9dcefbb83 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -13,6 +13,8 @@ import boto3 from moto.core import BaseBackend, BaseModel from .exceptions import ( + CertificateStateException, + DeleteConflictException, ResourceNotFoundException, InvalidRequestException, InvalidStateTransitionException, @@ -226,14 +228,12 @@ class FakeJob(BaseModel): self.targets = targets self.document_source = document_source self.document = document - self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None - self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -260,11 +260,9 @@ class FakeJob(BaseModel): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, + 'completedAt': self.completedAt, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -477,7 +475,25 @@ class IoTBackend(BaseBackend): return certificate, key_pair def delete_certificate(self, certificate_id): - self.describe_certificate(certificate_id) + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + del self.certificates[certificate_id] def describe_certificate(self, certificate_id): @@ -532,6 +548,14 @@ class IoTBackend(BaseBackend): return policies[0] def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + policy = self.get_policy(policy_name) del self.policies[policy.name] @@ -601,6 +625,14 @@ class IoTBackend(BaseBackend): pass raise ResourceNotFoundException() + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -609,6 +641,15 @@ class IoTBackend(BaseBackend): return self.principal_policies[k] = (principal, policy) + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) @@ -820,102 +861,6 @@ class IoTBackend(BaseBackend): def get_job_document(self, job_id): return self.jobs[job_id] - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): - # TODO: implement filters - all_jobs = [_.to_dict() for _ in self.jobs.values()] - filtered_jobs = all_jobs - - if token is None: - jobs = filtered_jobs[0:max_results] - next_token = str(max_results) if len(filtered_jobs) > max_results else None - else: - token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None - - return jobs, next_token - - def describe_job_execution(self, job_id, thing_name, execution_number): - try: - job_execution = self.job_executions[(job_id, thing_name)] - except KeyError: - raise ResourceNotFoundException() - - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): - raise ResourceNotFoundException() - - return job_execution - - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution is None: - raise ResourceNotFoundException() - - job_execution.force_canceled = force if force is not None else job_execution.force_canceled - # TODO: implement expected_version and status_details (at most 10 can be specified) - - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - else: - raise InvalidStateTransitionException() - - def delete_job_execution(self, job_id, thing_name, execution_number, force): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution.execution_number != execution_number: - raise ResourceNotFoundException() - - if job_execution.status == 'IN_PROGRESS' and force: - del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': - del self.job_executions[(job_id, thing_name)] - else: - raise InvalidStateTransitionException() - - def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 3dc95e9f6..0a941ccfc 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -115,23 +115,39 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def create_job(self): - job = self.iot_backend.create_job( + job_arn, job_id, description = self.iot_backend.create_job( job_id=self._get_param("jobId"), targets=self._get_param("targets"), description=self._get_param("description"), document_source=self._get_param("documentSource"), document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), + presigned_url_config=self._get_param("presignedUrlConfig"), target_selection=self._get_param("targetSelection"), job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), document_parameters=self._get_param("documentParameters") ) - return json.dumps(job.to_dict()) + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict(documentSource=job.document_source, job=job.to_dict())) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) def delete_job(self): job_id = self._get_param("jobId") @@ -140,8 +156,6 @@ class IoTResponse(BaseResponse): self.iot_backend.delete_job(job_id=job_id, force=force) - return json.dumps(dict()) - def cancel_job(self): job_id = self._get_param("jobId") reason_code = self._get_param("reasonCode") @@ -354,19 +368,10 @@ class IoTResponse(BaseResponse): def attach_policy(self): policy_name = self._get_param("policyName") - principal = self._get_param('target') + target = self._get_param('target') self.iot_backend.attach_policy( policy_name=policy_name, - target=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - principal = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=principal, + target=target, ) return json.dumps(dict()) @@ -390,6 +395,15 @@ class IoTResponse(BaseResponse): ) return json.dumps(dict()) + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + def detach_principal_policy(self): policy_name = self._get_param("policyName") principal = self.headers.get('x-amzn-iot-principal') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 3cf412796..33497a382 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -4,8 +4,9 @@ import json import sure #noqa import boto3 -from botocore.exceptions import ClientError from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises @mock_iot def test_attach_policy(): @@ -384,6 +385,96 @@ def test_certs(): res.should.have.key('certificates').which.should.have.length_of(0) +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + @mock_iot def test_certs_create_inactive(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -432,6 +523,47 @@ def test_policy(): @mock_iot def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): client = boto3.client('iot', region_name='ap-northeast-1') policy_name = 'my-policy' doc = '{}' From d98e96ddd7bff6cebbc6d1eb4b66703d6b38605b Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 13:32:42 +0200 Subject: [PATCH 014/125] :rotating_light: --- moto/iot/models.py | 17 ----------------- moto/iot/responses.py | 3 ++- 2 files changed, 2 insertions(+), 18 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 9dcefbb83..f34164483 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -625,14 +625,6 @@ class IoTBackend(BaseBackend): pass raise ResourceNotFoundException() - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - def attach_principal_policy(self, policy_name, principal_arn): principal = self._get_principal(principal_arn) policy = self.get_policy(policy_name) @@ -641,15 +633,6 @@ class IoTBackend(BaseBackend): return self.principal_policies[k] = (principal, policy) - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - def detach_principal_policy(self, policy_name, principal_arn): # this may raises ResourceNotFoundException self._get_principal(principal_arn) diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 0a941ccfc..5b805465b 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -121,7 +121,8 @@ class IoTResponse(BaseResponse): description=self._get_param("description"), document_source=self._get_param("documentSource"), document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), target_selection=self._get_param("targetSelection"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), document_parameters=self._get_param("documentParameters") ) From 86c127142bc0a85b7fffbde6fa437d26ada5ea08 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:44:10 +0200 Subject: [PATCH 015/125] :white_check_mark: --- moto/core/models.py | 5 -- moto/iot/models.py | 103 +++++++++++++++++++++++++++++++++++++++++- moto/iot/responses.py | 4 ++ 3 files changed, 105 insertions(+), 7 deletions(-) diff --git a/moto/core/models.py b/moto/core/models.py index 9fe1e96bd..491e9f451 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -22,11 +22,6 @@ from .utils import ( ) -# "Mock" the AWS credentials as they can't be mocked in Botocore currently -os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") -os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") - - class BaseMockAWS(object): nested_count = 0 diff --git a/moto/iot/models.py b/moto/iot/models.py index f34164483..855591ffc 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -228,12 +228,14 @@ class FakeJob(BaseModel): self.targets = targets self.document_source = document_source self.document = document + self.force = False self.description = description self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED self.comment = None + self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) self.completed_at = None @@ -260,9 +262,11 @@ class FakeJob(BaseModel): 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, 'status': self.status, 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, 'createdAt': self.created_at, 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completedAt, + 'completedAt': self.completed_at, 'jobProcessDetails': self.job_process_details, 'documentParameters': self.document_parameters, 'document': self.document, @@ -806,7 +810,7 @@ class IoTBackend(BaseBackend): thing_name = thing_arn.split(':')[-1].split('/')[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution - return job + return job.job_arn, job_id, description def describe_job(self, job_id): jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] @@ -844,6 +848,101 @@ class IoTBackend(BaseBackend): def get_job_document(self, job_id): return self.jobs[job_id] + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + + def describe_job_execution(self, job_id, thing_name, execution_number): + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 5b805465b..df0b99871 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -139,6 +139,8 @@ class IoTResponse(BaseResponse): createdAt=job.created_at, description=job.description, documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, jobArn=job.job_arn, jobExecutionsRolloutConfig=job.job_executions_rollout_config, jobId=job.job_id, @@ -157,6 +159,8 @@ class IoTResponse(BaseResponse): self.iot_backend.delete_job(job_id=job_id, force=force) + return json.dumps(dict()) + def cancel_job(self): job_id = self._get_param("jobId") reason_code = self._get_param("reasonCode") From a07533792d07e38d0483176cc4724a6f2e271dc6 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:44:17 +0200 Subject: [PATCH 016/125] :memo: --- IMPLEMENTATION_COVERAGE.md | 129 +++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 64 deletions(-) diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index ba91eddbd..26ea1972a 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,3 +1,4 @@ + ## acm - 41% implemented - [X] add_tags_to_certificate - [X] delete_certificate @@ -440,7 +441,7 @@ - [ ] start_query_execution - [ ] stop_query_execution -## autoscaling - 42% implemented +## autoscaling - 44% implemented - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -491,7 +492,7 @@ - [ ] resume_processes - [X] set_desired_capacity - [X] set_instance_health -- [ ] set_instance_protection +- [X] set_instance_protection - [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group @@ -645,19 +646,19 @@ - [ ] upgrade_applied_schema - [ ] upgrade_published_schema -## cloudformation - 20% implemented +## cloudformation - 40% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set - [X] create_stack -- [ ] create_stack_instances -- [ ] create_stack_set -- [ ] delete_change_set +- [X] create_stack_instances +- [X] create_stack_set +- [X] delete_change_set - [X] delete_stack -- [ ] delete_stack_instances -- [ ] delete_stack_set +- [X] delete_stack_instances +- [X] delete_stack_set - [ ] describe_account_limits -- [ ] describe_change_set +- [X] describe_change_set - [ ] describe_stack_drift_detection_status - [ ] describe_stack_events - [ ] describe_stack_instance @@ -674,7 +675,7 @@ - [ ] get_stack_policy - [ ] get_template - [ ] get_template_summary -- [ ] list_change_sets +- [X] list_change_sets - [X] list_exports - [ ] list_imports - [ ] list_stack_instances @@ -688,9 +689,9 @@ - [ ] stop_stack_set_operation - [X] update_stack - [ ] update_stack_instances -- [ ] update_stack_set +- [X] update_stack_set - [ ] update_termination_protection -- [ ] validate_template +- [X] validate_template ## cloudfront - 0% implemented - [ ] create_cloud_front_origin_access_identity @@ -1023,9 +1024,9 @@ - [ ] unlink_identity - [ ] update_identity_pool -## cognito-idp - 27% implemented +## cognito-idp - 36% implemented - [ ] add_custom_attributes -- [ ] admin_add_user_to_group +- [X] admin_add_user_to_group - [ ] admin_confirm_sign_up - [X] admin_create_user - [X] admin_delete_user @@ -1039,9 +1040,9 @@ - [X] admin_initiate_auth - [ ] admin_link_provider_for_user - [ ] admin_list_devices -- [ ] admin_list_groups_for_user +- [X] admin_list_groups_for_user - [ ] admin_list_user_auth_events -- [ ] admin_remove_user_from_group +- [X] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge - [ ] admin_set_user_mfa_preference @@ -1055,14 +1056,14 @@ - [ ] confirm_device - [X] confirm_forgot_password - [ ] confirm_sign_up -- [ ] create_group +- [X] create_group - [X] create_identity_provider - [ ] create_resource_server - [ ] create_user_import_job - [X] create_user_pool - [X] create_user_pool_client - [X] create_user_pool_domain -- [ ] delete_group +- [X] delete_group - [X] delete_identity_provider - [ ] delete_resource_server - [ ] delete_user @@ -1081,7 +1082,7 @@ - [ ] forgot_password - [ ] get_csv_header - [ ] get_device -- [ ] get_group +- [X] get_group - [ ] get_identity_provider_by_identifier - [ ] get_signing_certificate - [ ] get_ui_customization @@ -1091,14 +1092,14 @@ - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices -- [ ] list_groups +- [X] list_groups - [X] list_identity_providers - [ ] list_resource_servers - [ ] list_user_import_jobs - [X] list_user_pool_clients - [X] list_user_pools - [X] list_users -- [ ] list_users_in_group +- [X] list_users_in_group - [ ] resend_confirmation_code - [X] respond_to_auth_challenge - [ ] set_risk_configuration @@ -1112,7 +1113,7 @@ - [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group -- [ ] update_identity_provider +- [X] update_identity_provider - [ ] update_resource_server - [ ] update_user_attributes - [ ] update_user_pool @@ -1188,14 +1189,14 @@ - [ ] detect_entities - [ ] detect_phi -## config - 0% implemented +## config - 19% implemented - [ ] batch_get_aggregate_resource_config - [ ] batch_get_resource_config - [ ] delete_aggregation_authorization - [ ] delete_config_rule - [ ] delete_configuration_aggregator -- [ ] delete_configuration_recorder -- [ ] delete_delivery_channel +- [X] delete_configuration_recorder +- [X] delete_delivery_channel - [ ] delete_evaluation_results - [ ] delete_pending_aggregation_request - [ ] delete_retention_configuration @@ -1208,10 +1209,10 @@ - [ ] describe_config_rules - [ ] describe_configuration_aggregator_sources_status - [ ] describe_configuration_aggregators -- [ ] describe_configuration_recorder_status -- [ ] describe_configuration_recorders +- [X] describe_configuration_recorder_status +- [X] describe_configuration_recorders - [ ] describe_delivery_channel_status -- [ ] describe_delivery_channels +- [X] describe_delivery_channels - [ ] describe_pending_aggregation_requests - [ ] describe_retention_configurations - [ ] get_aggregate_compliance_details_by_config_rule @@ -1229,13 +1230,13 @@ - [ ] put_aggregation_authorization - [ ] put_config_rule - [ ] put_configuration_aggregator -- [ ] put_configuration_recorder -- [ ] put_delivery_channel +- [X] put_configuration_recorder +- [X] put_delivery_channel - [ ] put_evaluations - [ ] put_retention_configuration - [ ] start_config_rules_evaluation -- [ ] start_configuration_recorder -- [ ] stop_configuration_recorder +- [X] start_configuration_recorder +- [X] stop_configuration_recorder ## connect - 0% implemented - [ ] create_user @@ -1616,11 +1617,11 @@ - [ ] update_table - [ ] update_time_to_live -## dynamodbstreams - 0% implemented -- [ ] describe_stream -- [ ] get_records -- [ ] get_shard_iterator -- [ ] list_streams +## dynamodbstreams - 100% implemented +- [X] describe_stream +- [X] get_records +- [X] get_shard_iterator +- [X] list_streams ## ec2 - 30% implemented - [ ] accept_reserved_instances_exchange_quote @@ -2684,7 +2685,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 43% implemented +## iam - 56% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -2701,7 +2702,7 @@ - [X] create_policy - [X] create_policy_version - [X] create_role -- [ ] create_saml_provider +- [X] create_saml_provider - [ ] create_service_linked_role - [ ] create_service_specific_credential - [X] create_user @@ -2720,11 +2721,11 @@ - [X] delete_role - [ ] delete_role_permissions_boundary - [X] delete_role_policy -- [ ] delete_saml_provider +- [X] delete_saml_provider - [X] delete_server_certificate - [ ] delete_service_linked_role - [ ] delete_service_specific_credential -- [ ] delete_signing_certificate +- [X] delete_signing_certificate - [ ] delete_ssh_public_key - [X] delete_user - [ ] delete_user_permissions_boundary @@ -2736,7 +2737,7 @@ - [X] enable_mfa_device - [ ] generate_credential_report - [ ] generate_service_last_accessed_details -- [ ] get_access_key_last_used +- [X] get_access_key_last_used - [X] get_account_authorization_details - [ ] get_account_password_policy - [ ] get_account_summary @@ -2752,7 +2753,7 @@ - [X] get_policy_version - [X] get_role - [X] get_role_policy -- [ ] get_saml_provider +- [X] get_saml_provider - [X] get_server_certificate - [ ] get_service_last_accessed_details - [ ] get_service_last_accessed_details_with_entities @@ -2777,12 +2778,12 @@ - [ ] list_policies_granting_service_access - [X] list_policy_versions - [X] list_role_policies -- [ ] list_role_tags -- [ ] list_roles -- [ ] list_saml_providers +- [X] list_role_tags +- [X] list_roles +- [X] list_saml_providers - [ ] list_server_certificates - [ ] list_service_specific_credentials -- [ ] list_signing_certificates +- [X] list_signing_certificates - [ ] list_ssh_public_keys - [X] list_user_policies - [ ] list_user_tags @@ -2801,9 +2802,9 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy -- [ ] tag_role +- [X] tag_role - [ ] tag_user -- [ ] untag_role +- [X] untag_role - [ ] untag_user - [X] update_access_key - [ ] update_account_password_policy @@ -2811,16 +2812,16 @@ - [ ] update_group - [X] update_login_profile - [ ] update_open_id_connect_provider_thumbprint -- [ ] update_role -- [ ] update_role_description -- [ ] update_saml_provider +- [X] update_role +- [X] update_role_description +- [X] update_saml_provider - [ ] update_server_certificate - [ ] update_service_specific_credential -- [ ] update_signing_certificate +- [X] update_signing_certificate - [ ] update_ssh_public_key -- [ ] update_user +- [X] update_user - [ ] upload_server_certificate -- [ ] upload_signing_certificate +- [X] upload_signing_certificate - [ ] upload_ssh_public_key ## importexport - 0% implemented @@ -3213,7 +3214,7 @@ - [ ] update_data_retention - [ ] update_stream -## kms - 31% implemented +## kms - 41% implemented - [X] cancel_key_deletion - [ ] connect_custom_key_store - [ ] create_alias @@ -3232,7 +3233,7 @@ - [X] enable_key - [X] enable_key_rotation - [ ] encrypt -- [ ] generate_data_key +- [X] generate_data_key - [ ] generate_data_key_without_plaintext - [ ] generate_random - [X] get_key_policy @@ -3243,18 +3244,18 @@ - [ ] list_grants - [ ] list_key_policies - [X] list_keys -- [ ] list_resource_tags +- [X] list_resource_tags - [ ] list_retirable_grants - [X] put_key_policy - [ ] re_encrypt - [ ] retire_grant - [ ] revoke_grant - [X] schedule_key_deletion -- [ ] tag_resource +- [X] tag_resource - [ ] untag_resource - [ ] update_alias - [ ] update_custom_key_store -- [ ] update_key_description +- [X] update_key_description ## lambda - 0% implemented - [ ] add_layer_version_permission @@ -4702,20 +4703,20 @@ - [ ] put_attributes - [ ] select -## secretsmanager - 27% implemented +## secretsmanager - 44% implemented - [ ] cancel_rotate_secret - [X] create_secret - [ ] delete_resource_policy -- [ ] delete_secret +- [X] delete_secret - [X] describe_secret - [X] get_random_password - [ ] get_resource_policy - [X] get_secret_value - [ ] list_secret_version_ids -- [ ] list_secrets +- [X] list_secrets - [ ] put_resource_policy - [ ] put_secret_value -- [ ] restore_secret +- [X] restore_secret - [X] rotate_secret - [ ] tag_resource - [ ] untag_resource From 8cd62728c603b8d0ff973ddad9e55b50769350b5 Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 15:50:13 +0200 Subject: [PATCH 017/125] :rotating_light: --- moto/core/models.py | 1 - moto/iot/models.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/core/models.py b/moto/core/models.py index 491e9f451..19267ca08 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -4,7 +4,6 @@ from __future__ import absolute_import import functools import inspect -import os import re import six from io import BytesIO diff --git a/moto/iot/models.py b/moto/iot/models.py index 855591ffc..5dea4ee66 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -944,5 +944,6 @@ class IoTBackend(BaseBackend): return job_executions, next_token + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} From 263d85834917693344e3b6915c95be310e8c537e Mon Sep 17 00:00:00 2001 From: Stephan Date: Mon, 29 Apr 2019 16:21:41 +0200 Subject: [PATCH 018/125] setting envvars --- moto/core/models.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/moto/core/models.py b/moto/core/models.py index 19267ca08..9fe1e96bd 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -4,6 +4,7 @@ from __future__ import absolute_import import functools import inspect +import os import re import six from io import BytesIO @@ -21,6 +22,11 @@ from .utils import ( ) +# "Mock" the AWS credentials as they can't be mocked in Botocore currently +os.environ.setdefault("AWS_ACCESS_KEY_ID", "foobar_key") +os.environ.setdefault("AWS_SECRET_ACCESS_KEY", "foobar_secret") + + class BaseMockAWS(object): nested_count = 0 From 3020ee408ae2c12a377019f5e1d0d10fc0f6a284 Mon Sep 17 00:00:00 2001 From: Stephan Date: Tue, 28 May 2019 08:56:49 +0200 Subject: [PATCH 019/125] Merged iot --- moto/iot/models.py | 1922 ++++++++++++------------ moto/iot/responses.py | 1226 ++++++++-------- tests/test_iot/test_iot.py | 2818 ++++++++++++++++++------------------ 3 files changed, 2983 insertions(+), 2983 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index 4399e8790..89d71dd14 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -1,961 +1,961 @@ -from __future__ import unicode_literals - -import hashlib -import random -import re -import string -import time -import uuid -from collections import OrderedDict -from datetime import datetime - -import boto3 - -from moto.core import BaseBackend, BaseModel -from .exceptions import ( - CertificateStateException, - DeleteConflictException, - ResourceNotFoundException, - InvalidRequestException, - InvalidStateTransitionException, - VersionConflictException -) - - -class FakeThing(BaseModel): - def __init__(self, thing_name, thing_type, attributes, region_name): - self.region_name = region_name - self.thing_name = thing_name - self.thing_type = thing_type - self.attributes = attributes - self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) - self.version = 1 - # TODO: we need to handle 'version'? - - # for iot-data - self.thing_shadow = None - - def to_dict(self, include_default_client_id=False): - obj = { - 'thingName': self.thing_name, - 'thingArn': self.arn, - 'attributes': self.attributes, - 'version': self.version - } - if self.thing_type: - obj['thingTypeName'] = self.thing_type.thing_type_name - if include_default_client_id: - obj['defaultClientId'] = self.thing_name - return obj - - -class FakeThingType(BaseModel): - def __init__(self, thing_type_name, thing_type_properties, region_name): - self.region_name = region_name - self.thing_type_name = thing_type_name - self.thing_type_properties = thing_type_properties - self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id - t = time.time() - self.metadata = { - 'deprecated': False, - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) - - def to_dict(self): - return { - 'thingTypeName': self.thing_type_name, - 'thingTypeId': self.thing_type_id, - 'thingTypeProperties': self.thing_type_properties, - 'thingTypeMetadata': self.metadata - } - - -class FakeThingGroup(BaseModel): - def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): - self.region_name = region_name - self.thing_group_name = thing_group_name - self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id - self.version = 1 # TODO: tmp - self.parent_group_name = parent_group_name - self.thing_group_properties = thing_group_properties or {} - t = time.time() - self.metadata = { - 'creationData': int(t * 1000) / 1000.0 - } - self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) - self.things = OrderedDict() - - def to_dict(self): - return { - 'thingGroupName': self.thing_group_name, - 'thingGroupId': self.thing_group_id, - 'version': self.version, - 'thingGroupProperties': self.thing_group_properties, - 'thingGroupMetadata': self.metadata - } - - -class FakeCertificate(BaseModel): - def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): - m = hashlib.sha256() - m.update(str(uuid.uuid4()).encode('utf-8')) - self.certificate_id = m.hexdigest() - self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) - self.certificate_pem = certificate_pem - self.status = status - - # TODO: must adjust - self.owner = '1' - self.transfer_data = {} - self.creation_date = time.time() - self.last_modified_date = self.creation_date - - self.ca_certificate_id = None - self.ca_certificate_pem = ca_certificate_pem - if ca_certificate_pem: - m.update(str(uuid.uuid4()).encode('utf-8')) - self.ca_certificate_id = m.hexdigest() - - def to_dict(self): - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'caCertificateId': self.ca_certificate_id, - 'status': self.status, - 'creationDate': self.creation_date - } - - def to_description_dict(self): - """ - You might need keys below in some situation - - caCertificateId - - previousOwnedBy - """ - return { - 'certificateArn': self.arn, - 'certificateId': self.certificate_id, - 'status': self.status, - 'certificatePem': self.certificate_pem, - 'ownedBy': self.owner, - 'creationDate': self.creation_date, - 'lastModifiedDate': self.last_modified_date, - 'transferData': self.transfer_data - } - - -class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): - self.name = name - self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) - self.default_version_id = default_version_id - self.versions = [FakePolicyVersion(self.name, document, True, region_name)] - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id - } - - def to_dict_at_creation(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id - } - - def to_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - } - - -class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): - self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) - self.document = document or {} - self.is_default = is_default - self.version_id = '1' - - self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) - - def to_get_dict(self): - return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id - } - - def to_dict_at_creation(self): - return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default - } - - def to_dict(self): - return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, - } - - -class FakeJob(BaseModel): - JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" - JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) - - def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, region_name): - if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): - raise InvalidRequestException() - - self.region_name = region_name - self.job_id = job_id - self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) - self.targets = targets - self.document_source = document_source - self.document = document - self.force = False - self.description = description - self.presigned_url_config = presigned_url_config - self.target_selection = target_selection - self.job_executions_rollout_config = job_executions_rollout_config - self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED - self.comment = None - self.reason_code = None - self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.completed_at = None - self.job_process_details = { - 'processingTargets': targets, - 'numberOfQueuedThings': 1, - 'numberOfCanceledThings': 0, - 'numberOfSucceededThings': 0, - 'numberOfFailedThings': 0, - 'numberOfRejectedThings': 0, - 'numberOfInProgressThings': 0, - 'numberOfRemovedThings': 0 - } - self.document_parameters = document_parameters - - def to_dict(self): - obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source - } - - return obj - - def _job_id_matcher(self, regex, argument): - regex_match = regex.match(argument) - length_match = len(argument) <= 64 - return regex_match and length_match - - -class FakeJobExecution(BaseModel): - - def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): - self.job_id = job_id - self.status = status # IN_PROGRESS | CANCELED | COMPLETED - self.force_canceled = force_canceled - self.status_details_map = status_details_map - self.thing_arn = thing_arn - self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) - self.execution_number = 123 - self.version_number = 123 - self.approximate_seconds_before_time_out = 123 - - def to_get_dict(self): - obj = { - 'jobId': self.job_id, - 'status': self.status, - 'forceCanceled': self.force_canceled, - 'statusDetails': {'detailsMap': self.status_details_map}, - 'thingArn': self.thing_arn, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - 'versionNumber': self.version_number, - 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out - } - - return obj - - def to_dict(self): - obj = { - 'jobId': self.job_id, - 'thingArn': self.thing_arn, - 'jobExecutionSummary': { - 'status': self.status, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - } - } - - return obj - - -class IoTBackend(BaseBackend): - def __init__(self, region_name=None): - super(IoTBackend, self).__init__() - self.region_name = region_name - self.things = OrderedDict() - self.jobs = OrderedDict() - self.job_executions = OrderedDict() - self.thing_types = OrderedDict() - self.thing_groups = OrderedDict() - self.certificates = OrderedDict() - self.policies = OrderedDict() - self.principal_policies = OrderedDict() - self.principal_things = OrderedDict() - - def reset(self): - region_name = self.region_name - self.__dict__ = {} - self.__init__(region_name) - - def create_thing(self, thing_name, thing_type_name, attribute_payload): - thing_types = self.list_thing_types() - thing_type = None - if thing_type_name: - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - if attribute_payload is None: - attributes = {} - elif 'attributes' not in attribute_payload: - attributes = {} - else: - attributes = attribute_payload['attributes'] - thing = FakeThing(thing_name, thing_type, attributes, self.region_name) - self.things[thing.arn] = thing - return thing.thing_name, thing.arn - - def create_thing_type(self, thing_type_name, thing_type_properties): - if thing_type_properties is None: - thing_type_properties = {} - thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) - self.thing_types[thing_type.arn] = thing_type - return thing_type.thing_type_name, thing_type.arn - - def list_thing_types(self, thing_type_name=None): - if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match - return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] - return self.thing_types.values() - - def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): - all_things = [_.to_dict() for _ in self.things.values()] - if attribute_name is not None and thing_type_name is not None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value and - "thingTypeName" in elem and - elem["thingTypeName"] == thing_type_name, all_things)) - elif attribute_name is not None and thing_type_name is None: - filtered_things = list(filter(lambda elem: - attribute_name in elem["attributes"] and - elem["attributes"][attribute_name] == attribute_value, all_things)) - elif attribute_name is None and thing_type_name is not None: - filtered_things = list( - filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) - else: - filtered_things = all_things - - if token is None: - things = filtered_things[0:max_results] - next_token = str(max_results) if len(filtered_things) > max_results else None - else: - token = int(token) - things = filtered_things[token:token + max_results] - next_token = str(token + max_results) if len(filtered_things) > token + max_results else None - - return things, next_token - - def describe_thing(self, thing_name): - things = [_ for _ in self.things.values() if _.thing_name == thing_name] - if len(things) == 0: - raise ResourceNotFoundException() - return things[0] - - def describe_thing_type(self, thing_type_name): - thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] - if len(thing_types) == 0: - raise ResourceNotFoundException() - return thing_types[0] - - def delete_thing(self, thing_name, expected_version): - # TODO: handle expected_version - - # can raise ResourceNotFoundError - thing = self.describe_thing(thing_name) - del self.things[thing.arn] - - def delete_thing_type(self, thing_type_name): - # can raise ResourceNotFoundError - thing_type = self.describe_thing_type(thing_type_name) - del self.thing_types[thing_type.arn] - - def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): - # if attributes payload = {}, nothing - thing = self.describe_thing(thing_name) - thing_type = None - - if remove_thing_type and thing_type_name: - raise InvalidRequestException() - - # thing_type - if thing_type_name: - thing_types = self.list_thing_types() - filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] - if len(filtered_thing_types) == 0: - raise ResourceNotFoundException() - thing_type = filtered_thing_types[0] - thing.thing_type = thing_type - - if remove_thing_type: - thing.thing_type = None - - # attribute - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing.attributes = attributes - else: - thing.attributes.update(attributes) - - def _random_string(self): - n = 20 - random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) - return random_str - - def create_keys_and_certificate(self, set_as_active): - # implement here - # caCertificate can be blank - key_pair = { - 'PublicKey': self._random_string(), - 'PrivateKey': self._random_string() - } - certificate_pem = self._random_string() - status = 'ACTIVE' if set_as_active else 'INACTIVE' - certificate = FakeCertificate(certificate_pem, status, self.region_name) - self.certificates[certificate.certificate_id] = certificate - return certificate, key_pair - - def delete_certificate(self, certificate_id): - cert = self.describe_certificate(certificate_id) - if cert.status == 'ACTIVE': - raise CertificateStateException( - 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) - - certs = [k[0] for k, v in self.principal_things.items() - if self._get_principal(k[0]).certificate_id == certificate_id] - if len(certs) > 0: - raise DeleteConflictException( - 'Things must be detached before deletion (arn: %s)' % certs[0] - ) - - certs = [k[0] for k, v in self.principal_policies.items() - if self._get_principal(k[0]).certificate_id == certificate_id] - if len(certs) > 0: - raise DeleteConflictException( - 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] - ) - - del self.certificates[certificate_id] - - def describe_certificate(self, certificate_id): - certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] - if len(certs) == 0: - raise ResourceNotFoundException() - return certs[0] - - def list_certificates(self): - return self.certificates.values() - - def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): - certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, - self.region_name, ca_certificate_pem) - self.certificates[certificate.certificate_id] = certificate - return certificate - - def update_certificate(self, certificate_id, new_status): - cert = self.describe_certificate(certificate_id) - # TODO: validate new_status - cert.status = new_status - - def create_policy(self, policy_name, policy_document): - policy = FakePolicy(policy_name, policy_document, self.region_name) - self.policies[policy.name] = policy - return policy - - def attach_policy(self, policy_name, target): - principal = self._get_principal(target) - policy = self.get_policy(policy_name) - k = (target, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_policy(self, policy_name, target): - # this may raises ResourceNotFoundException - self._get_principal(target) - self.get_policy(policy_name) - - k = (target, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_attached_policies(self, target): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] - return policies - - def list_policies(self): - policies = self.policies.values() - return policies - - def get_policy(self, policy_name): - policies = [_ for _ in self.policies.values() if _.name == policy_name] - if len(policies) == 0: - raise ResourceNotFoundException() - return policies[0] - - def delete_policy(self, policy_name): - - policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] - if len(policies) > 0: - raise DeleteConflictException( - 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' - % policy_name - ) - - policy = self.get_policy(policy_name) - del self.policies[policy.name] - - def create_policy_version(self, policy_name, policy_document, set_as_default): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) - policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) - if set_as_default: - self.set_default_policy_version(policy_name, version.version_id) - return version - - def set_default_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - version.is_default = True - policy.default_version_id = version.version_id - policy.document = version.document - else: - version.is_default = False - - def get_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - for version in policy.versions: - if version.version_id == version_id: - return version - raise ResourceNotFoundException() - - def list_policy_versions(self, policy_name): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - return policy.versions - - def delete_policy_version(self, policy_name, version_id): - policy = self.get_policy(policy_name) - if not policy: - raise ResourceNotFoundException() - if version_id == policy.default_version_id: - raise InvalidRequestException( - "Cannot delete the default version of a policy") - for i, v in enumerate(policy.versions): - if v.version_id == version_id: - del policy.versions[i] - return - raise ResourceNotFoundException() - - def _get_principal(self, principal_arn): - """ - raise ResourceNotFoundException - """ - if ':cert/' in principal_arn: - certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] - if len(certs) == 0: - raise ResourceNotFoundException() - principal = certs[0] - return principal - else: - # TODO: search for cognito_ids - pass - raise ResourceNotFoundException() - - def attach_principal_policy(self, policy_name, principal_arn): - principal = self._get_principal(principal_arn) - policy = self.get_policy(policy_name) - k = (principal_arn, policy_name) - if k in self.principal_policies: - return - self.principal_policies[k] = (principal, policy) - - def detach_principal_policy(self, policy_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.get_policy(policy_name) - - k = (principal_arn, policy_name) - if k not in self.principal_policies: - raise ResourceNotFoundException() - del self.principal_policies[k] - - def list_principal_policies(self, principal_arn): - policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] - return policies - - def list_policy_principals(self, policy_name): - principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] - return principals - - def attach_thing_principal(self, thing_name, principal_arn): - principal = self._get_principal(principal_arn) - thing = self.describe_thing(thing_name) - k = (principal_arn, thing_name) - if k in self.principal_things: - return - self.principal_things[k] = (principal, thing) - - def detach_thing_principal(self, thing_name, principal_arn): - # this may raises ResourceNotFoundException - self._get_principal(principal_arn) - self.describe_thing(thing_name) - - k = (principal_arn, thing_name) - if k not in self.principal_things: - raise ResourceNotFoundException() - del self.principal_things[k] - - def list_principal_things(self, principal_arn): - thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] - return thing_names - - def list_thing_principals(self, thing_name): - principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] - return principals - - def describe_thing_group(self, thing_group_name): - thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] - if len(thing_groups) == 0: - raise ResourceNotFoundException() - return thing_groups[0] - - def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): - thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) - self.thing_groups[thing_group.arn] = thing_group - return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id - - def delete_thing_group(self, thing_group_name, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - del self.thing_groups[thing_group.arn] - - def list_thing_groups(self, parent_group, name_prefix_filter, recursive): - thing_groups = self.thing_groups.values() - return thing_groups - - def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): - thing_group = self.describe_thing_group(thing_group_name) - if expected_version and expected_version != thing_group.version: - raise VersionConflictException(thing_group_name) - attribute_payload = thing_group_properties.get('attributePayload', None) - if attribute_payload is not None and 'attributes' in attribute_payload: - do_merge = attribute_payload.get('merge', False) - attributes = attribute_payload['attributes'] - if not do_merge: - thing_group.thing_group_properties['attributePayload']['attributes'] = attributes - else: - thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) - elif attribute_payload is not None and 'attributes' not in attribute_payload: - thing_group.attributes = {} - thing_group.version = thing_group.version + 1 - return thing_group.version - - def _identify_thing_group(self, thing_group_name, thing_group_arn): - # identify thing group - if thing_group_name is None and thing_group_arn is None: - raise InvalidRequestException( - ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' - ) - if thing_group_name is not None: - thing_group = self.describe_thing_group(thing_group_name) - if thing_group_arn and thing_group.arn != thing_group_arn: - raise InvalidRequestException( - 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' - ) - elif thing_group_arn is not None: - if thing_group_arn not in self.thing_groups: - raise InvalidRequestException() - thing_group = self.thing_groups[thing_group_arn] - return thing_group - - def _identify_thing(self, thing_name, thing_arn): - # identify thing - if thing_name is None and thing_arn is None: - raise InvalidRequestException( - 'Both thingArn and thingName are empty. Need to specify at least one of them' - ) - if thing_name is not None: - thing = self.describe_thing(thing_name) - if thing_arn and thing.arn != thing_arn: - raise InvalidRequestException( - 'ThingName thingArn does not match specified thingName in request' - ) - elif thing_arn is not None: - if thing_arn not in self.things: - raise InvalidRequestException() - thing = self.things[thing_arn] - return thing - - def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn in thing_group.things: - # aws ignores duplicate registration - return - thing_group.things[thing.arn] = thing - - def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): - thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) - thing = self._identify_thing(thing_name, thing_arn) - if thing.arn not in thing_group.things: - # aws ignores non-registered thing - return - del thing_group.things[thing.arn] - - def list_things_in_thing_group(self, thing_group_name, recursive): - thing_group = self.describe_thing_group(thing_group_name) - return thing_group.things.values() - - def list_thing_groups_for_thing(self, thing_name): - thing = self.describe_thing(thing_name) - all_thing_groups = self.list_thing_groups(None, None, None) - ret = [] - for thing_group in all_thing_groups: - if thing.arn in thing_group.things: - ret.append({ - 'groupName': thing_group.thing_group_name, - 'groupArn': thing_group.arn - }) - return ret - - def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): - thing = self.describe_thing(thing_name) - for thing_group_name in thing_groups_to_add: - thing_group = self.describe_thing_group(thing_group_name) - self.add_thing_to_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - for thing_group_name in thing_groups_to_remove: - thing_group = self.describe_thing_group(thing_group_name) - self.remove_thing_from_thing_group( - thing_group.thing_group_name, None, - thing.thing_name, None - ) - - def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, - target_selection, job_executions_rollout_config, document_parameters): - job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, - job_executions_rollout_config, document_parameters, self.region_name) - self.jobs[job_id] = job - - for thing_arn in targets: - thing_name = thing_arn.split(':')[-1].split('/')[-1] - job_execution = FakeJobExecution(job_id, thing_arn) - self.job_executions[(job_id, thing_name)] = job_execution - return job.job_arn, job_id, description - - def describe_job(self, job_id): - jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] - if len(jobs) == 0: - raise ResourceNotFoundException() - return jobs[0] - - def delete_job(self, job_id, force): - job = self.jobs[job_id] - - if job.status == 'IN_PROGRESS' and force: - del self.jobs[job_id] - elif job.status != 'IN_PROGRESS': - del self.jobs[job_id] - else: - raise InvalidStateTransitionException() - - def cancel_job(self, job_id, reason_code, comment, force): - job = self.jobs[job_id] - - job.reason_code = reason_code if reason_code is not None else job.reason_code - job.comment = comment if comment is not None else job.comment - job.force = force if force is not None and force != job.force else job.force - job.status = 'CANCELED' - - if job.status == 'IN_PROGRESS' and force: - self.jobs[job_id] = job - elif job.status != 'IN_PROGRESS': - self.jobs[job_id] = job - else: - raise InvalidStateTransitionException() - - return job - - def get_job_document(self, job_id): - return self.jobs[job_id] - - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): - # TODO: implement filters - all_jobs = [_.to_dict() for _ in self.jobs.values()] - filtered_jobs = all_jobs - - if token is None: - jobs = filtered_jobs[0:max_results] - next_token = str(max_results) if len(filtered_jobs) > max_results else None - else: - token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None - - return jobs, next_token - - def describe_job_execution(self, job_id, thing_name, execution_number): - try: - job_execution = self.job_executions[(job_id, thing_name)] - except KeyError: - raise ResourceNotFoundException() - - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): - raise ResourceNotFoundException() - - return job_execution - - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution is None: - raise ResourceNotFoundException() - - job_execution.force_canceled = force if force is not None else job_execution.force_canceled - # TODO: implement expected_version and status_details (at most 10 can be specified) - - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' - self.job_executions[(job_id, thing_name)] = job_execution - else: - raise InvalidStateTransitionException() - - def delete_job_execution(self, job_id, thing_name, execution_number, force): - job_execution = self.job_executions[(job_id, thing_name)] - - if job_execution.execution_number != execution_number: - raise ResourceNotFoundException() - - if job_execution.status == 'IN_PROGRESS' and force: - del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': - del self.job_executions[(job_id, thing_name)] - else: - raise InvalidStateTransitionException() - - def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] - - if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) - - token = next_token - if token is None: - job_executions = job_executions[0:max_results] - next_token = str(max_results) if len(job_executions) > max_results else None - else: - token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None - - return job_executions, next_token - - -available_regions = boto3.session.Session().get_available_regions("iot") -iot_backends = {region: IoTBackend(region) for region in available_regions} +from __future__ import unicode_literals + +import hashlib +import random +import re +import string +import time +import uuid +from collections import OrderedDict +from datetime import datetime + +import boto3 + +from moto.core import BaseBackend, BaseModel +from .exceptions import ( + CertificateStateException, + DeleteConflictException, + ResourceNotFoundException, + InvalidRequestException, + InvalidStateTransitionException, + VersionConflictException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'thingArn': self.arn, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name, ca_certificate_pem=None): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + + self.ca_certificate_id = None + self.ca_certificate_pem = ca_certificate_pem + if ca_certificate_pem: + m.update(str(uuid.uuid4()).encode('utf-8')) + self.ca_certificate_id = m.hexdigest() + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'caCertificateId': self.ca_certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name, default_version_id='1'): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.default_version_id = default_version_id + self.versions = [FakePolicyVersion(self.name, document, True, region_name)] + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.default_version_id + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.default_version_id + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class FakePolicyVersion(object): + + def __init__(self, + policy_name, + document, + is_default, + region_name): + self.name = policy_name + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.document = document or {} + self.is_default = is_default + self.version_id = '1' + + self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'creationDate': self.create_datetime, + 'lastModifiedDate': self.last_modified_datetime, + 'generationId': self.version_id + } + + def to_dict_at_creation(self): + return { + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version_id, + 'isDefaultVersion': self.is_default + } + + def to_dict(self): + return { + 'versionId': self.version_id, + 'isDefaultVersion': self.is_default, + 'createDate': self.create_datetime, + } + + +class FakeJob(BaseModel): + JOB_ID_REGEX_PATTERN = "[a-zA-Z0-9_-]" + JOB_ID_REGEX = re.compile(JOB_ID_REGEX_PATTERN) + + def __init__(self, job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, region_name): + if not self._job_id_matcher(self.JOB_ID_REGEX, job_id): + raise InvalidRequestException() + + self.region_name = region_name + self.job_id = job_id + self.job_arn = 'arn:aws:iot:%s:1:job/%s' % (self.region_name, job_id) + self.targets = targets + self.document_source = document_source + self.document = document + self.force = False + self.description = description + self.presigned_url_config = presigned_url_config + self.target_selection = target_selection + self.job_executions_rollout_config = job_executions_rollout_config + self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED + self.comment = None + self.reason_code = None + self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.completed_at = None + self.job_process_details = { + 'processingTargets': targets, + 'numberOfQueuedThings': 1, + 'numberOfCanceledThings': 0, + 'numberOfSucceededThings': 0, + 'numberOfFailedThings': 0, + 'numberOfRejectedThings': 0, + 'numberOfInProgressThings': 0, + 'numberOfRemovedThings': 0 + } + self.document_parameters = document_parameters + + def to_dict(self): + obj = { + 'jobArn': self.job_arn, + 'jobId': self.job_id, + 'targets': self.targets, + 'description': self.description, + 'presignedUrlConfig': self.presigned_url_config, + 'targetSelection': self.target_selection, + 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, + 'status': self.status, + 'comment': self.comment, + 'forceCanceled': self.force, + 'reasonCode': self.reason_code, + 'createdAt': self.created_at, + 'lastUpdatedAt': self.last_updated_at, + 'completedAt': self.completed_at, + 'jobProcessDetails': self.job_process_details, + 'documentParameters': self.document_parameters, + 'document': self.document, + 'documentSource': self.document_source + } + + return obj + + def _job_id_matcher(self, regex, argument): + regex_match = regex.match(argument) + length_match = len(argument) <= 64 + return regex_match and length_match + + +class FakeJobExecution(BaseModel): + + def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + self.job_id = job_id + self.status = status # IN_PROGRESS | CANCELED | COMPLETED + self.force_canceled = force_canceled + self.status_details_map = status_details_map + self.thing_arn = thing_arn + self.queued_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.started_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.last_updated_at = time.mktime(datetime(2015, 1, 1).timetuple()) + self.execution_number = 123 + self.version_number = 123 + self.approximate_seconds_before_time_out = 123 + + def to_get_dict(self): + obj = { + 'jobId': self.job_id, + 'status': self.status, + 'forceCanceled': self.force_canceled, + 'statusDetails': {'detailsMap': self.status_details_map}, + 'thingArn': self.thing_arn, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + 'versionNumber': self.version_number, + 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + } + + return obj + + def to_dict(self): + obj = { + 'jobId': self.job_id, + 'thingArn': self.thing_arn, + 'jobExecutionSummary': { + 'status': self.status, + 'queuedAt': self.queued_at, + 'startedAt': self.started_at, + 'lastUpdatedAt': self.last_updated_at, + 'executionNumber': self.execution_number, + } + } + + return obj + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.jobs = OrderedDict() + self.job_executions = OrderedDict() + self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's weird but thing_type_name is filtered by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + return self.thing_types.values() + + def list_things(self, attribute_name, attribute_value, thing_type_name, max_results, token): + all_things = [_.to_dict() for _ in self.things.values()] + if attribute_name is not None and thing_type_name is not None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value and + "thingTypeName" in elem and + elem["thingTypeName"] == thing_type_name, all_things)) + elif attribute_name is not None and thing_type_name is None: + filtered_things = list(filter(lambda elem: + attribute_name in elem["attributes"] and + elem["attributes"][attribute_name] == attribute_value, all_things)) + elif attribute_name is None and thing_type_name is not None: + filtered_things = list( + filter(lambda elem: "thingTypeName" in elem and elem["thingTypeName"] == thing_type_name, all_things)) + else: + filtered_things = all_things + + if token is None: + things = filtered_things[0:max_results] + next_token = str(max_results) if len(filtered_things) > max_results else None + else: + token = int(token) + things = filtered_things[token:token + max_results] + next_token = str(token + max_results) if len(filtered_things) > token + max_results else None + + return things, next_token + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + cert = self.describe_certificate(certificate_id) + if cert.status == 'ACTIVE': + raise CertificateStateException( + 'Certificate must be deactivated (not ACTIVE) before deletion.', certificate_id) + + certs = [k[0] for k, v in self.principal_things.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Things must be detached before deletion (arn: %s)' % certs[0] + ) + + certs = [k[0] for k, v in self.principal_policies.items() + if self._get_principal(k[0]).certificate_id == certificate_id] + if len(certs) > 0: + raise DeleteConflictException( + 'Certificate policies must be detached before deletion (arn: %s)' % certs[0] + ) + + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def register_certificate(self, certificate_pem, ca_certificate_pem, set_as_active, status): + certificate = FakeCertificate(certificate_pem, 'ACTIVE' if set_as_active else status, + self.region_name, ca_certificate_pem) + self.certificates[certificate.certificate_id] = certificate + return certificate + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def attach_policy(self, policy_name, target): + principal = self._get_principal(target) + policy = self.get_policy(policy_name) + k = (target, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_policy(self, policy_name, target): + # this may raises ResourceNotFoundException + self._get_principal(target) + self.get_policy(policy_name) + + k = (target, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_attached_policies(self, target): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == target] + return policies + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + + policies = [k[1] for k, v in self.principal_policies.items() if k[1] == policy_name] + if len(policies) > 0: + raise DeleteConflictException( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' + % policy_name + ) + + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def create_policy_version(self, policy_name, policy_document, set_as_default): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + policy.versions.append(version) + version.version_id = '{0}'.format(len(policy.versions)) + if set_as_default: + self.set_default_policy_version(policy_name, version.version_id) + return version + + def set_default_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + version.is_default = True + policy.default_version_id = version.version_id + policy.document = version.document + else: + version.is_default = False + + def get_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + for version in policy.versions: + if version.version_id == version_id: + return version + raise ResourceNotFoundException() + + def list_policy_versions(self, policy_name): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + return policy.versions + + def delete_policy_version(self, policy_name, version_id): + policy = self.get_policy(policy_name) + if not policy: + raise ResourceNotFoundException() + if version_id == policy.default_version_id: + raise InvalidRequestException( + "Cannot delete the default version of a policy") + for i, v in enumerate(policy.versions): + if v.version_id == version_id: + del policy.versions[i] + return + raise ResourceNotFoundException() + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + + def create_job(self, job_id, targets, document_source, document, description, presigned_url_config, + target_selection, job_executions_rollout_config, document_parameters): + job = FakeJob(job_id, targets, document_source, document, description, presigned_url_config, target_selection, + job_executions_rollout_config, document_parameters, self.region_name) + self.jobs[job_id] = job + + for thing_arn in targets: + thing_name = thing_arn.split(':')[-1].split('/')[-1] + job_execution = FakeJobExecution(job_id, thing_arn) + self.job_executions[(job_id, thing_name)] = job_execution + return job.job_arn, job_id, description + + def describe_job(self, job_id): + jobs = [_ for _ in self.jobs.values() if _.job_id == job_id] + if len(jobs) == 0: + raise ResourceNotFoundException() + return jobs[0] + + def delete_job(self, job_id, force): + job = self.jobs[job_id] + + if job.status == 'IN_PROGRESS' and force: + del self.jobs[job_id] + elif job.status != 'IN_PROGRESS': + del self.jobs[job_id] + else: + raise InvalidStateTransitionException() + + def cancel_job(self, job_id, reason_code, comment, force): + job = self.jobs[job_id] + + job.reason_code = reason_code if reason_code is not None else job.reason_code + job.comment = comment if comment is not None else job.comment + job.force = force if force is not None and force != job.force else job.force + job.status = 'CANCELED' + + if job.status == 'IN_PROGRESS' and force: + self.jobs[job_id] = job + elif job.status != 'IN_PROGRESS': + self.jobs[job_id] = job + else: + raise InvalidStateTransitionException() + + return job + + def get_job_document(self, job_id): + return self.jobs[job_id] + + def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + # TODO: implement filters + all_jobs = [_.to_dict() for _ in self.jobs.values()] + filtered_jobs = all_jobs + + if token is None: + jobs = filtered_jobs[0:max_results] + next_token = str(max_results) if len(filtered_jobs) > max_results else None + else: + token = int(token) + jobs = filtered_jobs[token:token + max_results] + next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + + return jobs, next_token + + def describe_job_execution(self, job_id, thing_name, execution_number): + try: + job_execution = self.job_executions[(job_id, thing_name)] + except KeyError: + raise ResourceNotFoundException() + + if job_execution is None or \ + (execution_number is not None and job_execution.execution_number != execution_number): + raise ResourceNotFoundException() + + return job_execution + + def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution is None: + raise ResourceNotFoundException() + + job_execution.force_canceled = force if force is not None else job_execution.force_canceled + # TODO: implement expected_version and status_details (at most 10 can be specified) + + if job_execution.status == 'IN_PROGRESS' and force: + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + elif job_execution.status != 'IN_PROGRESS': + job_execution.status = 'CANCELED' + self.job_executions[(job_id, thing_name)] = job_execution + else: + raise InvalidStateTransitionException() + + def delete_job_execution(self, job_id, thing_name, execution_number, force): + job_execution = self.job_executions[(job_id, thing_name)] + + if job_execution.execution_number != execution_number: + raise ResourceNotFoundException() + + if job_execution.status == 'IN_PROGRESS' and force: + del self.job_executions[(job_id, thing_name)] + elif job_execution.status != 'IN_PROGRESS': + del self.job_executions[(job_id, thing_name)] + else: + raise InvalidStateTransitionException() + + def list_job_executions_for_job(self, job_id, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): + job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + + if status is not None: + job_executions = list(filter(lambda elem: + status in elem["status"] and + elem["status"] == status, job_executions)) + + token = next_token + if token is None: + job_executions = job_executions[0:max_results] + next_token = str(max_results) if len(job_executions) > max_results else None + else: + token = int(token) + job_executions = job_executions[token:token + max_results] + next_token = str(token + max_results) if len(job_executions) > token + max_results else None + + return job_executions, next_token + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index 68a206047..8954c7003 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -1,613 +1,613 @@ -from __future__ import unicode_literals - -import json -from six.moves.urllib.parse import unquote - -from moto.core.responses import BaseResponse -from .models import iot_backends - - -class IoTResponse(BaseResponse): - SERVICE_NAME = 'iot' - - @property - def iot_backend(self): - return iot_backends[self.region] - - def create_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - thing_name, thing_arn = self.iot_backend.create_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - ) - return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) - - def create_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type_properties = self._get_param("thingTypeProperties") - thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( - thing_type_name=thing_type_name, - thing_type_properties=thing_type_properties, - ) - return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) - - def list_thing_types(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - thing_type_name = self._get_param("thingTypeName") - thing_types = self.iot_backend.list_thing_types( - thing_type_name=thing_type_name - ) - - thing_types = [_.to_dict() for _ in thing_types] - if previous_next_token is None: - result = thing_types[0:max_results] - next_token = str(max_results) if len(thing_types) > max_results else None - else: - token = int(previous_next_token) - result = thing_types[token:token + max_results] - next_token = str(token + max_results) if len(thing_types) > token + max_results else None - - return json.dumps(dict(thingTypes=result, nextToken=next_token)) - - def list_things(self): - previous_next_token = self._get_param("nextToken") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - attribute_name = self._get_param("attributeName") - attribute_value = self._get_param("attributeValue") - thing_type_name = self._get_param("thingTypeName") - things, next_token = self.iot_backend.list_things( - attribute_name=attribute_name, - attribute_value=attribute_value, - thing_type_name=thing_type_name, - max_results=max_results, - token=previous_next_token - ) - - return json.dumps(dict(things=things, nextToken=next_token)) - - def describe_thing(self): - thing_name = self._get_param("thingName") - thing = self.iot_backend.describe_thing( - thing_name=thing_name, - ) - return json.dumps(thing.to_dict(include_default_client_id=True)) - - def describe_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - thing_type = self.iot_backend.describe_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(thing_type.to_dict()) - - def delete_thing(self): - thing_name = self._get_param("thingName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing( - thing_name=thing_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def delete_thing_type(self): - thing_type_name = self._get_param("thingTypeName") - self.iot_backend.delete_thing_type( - thing_type_name=thing_type_name, - ) - return json.dumps(dict()) - - def update_thing(self): - thing_name = self._get_param("thingName") - thing_type_name = self._get_param("thingTypeName") - attribute_payload = self._get_param("attributePayload") - expected_version = self._get_param("expectedVersion") - remove_thing_type = self._get_param("removeThingType") - self.iot_backend.update_thing( - thing_name=thing_name, - thing_type_name=thing_type_name, - attribute_payload=attribute_payload, - expected_version=expected_version, - remove_thing_type=remove_thing_type, - ) - return json.dumps(dict()) - - def create_job(self): - job_arn, job_id, description = self.iot_backend.create_job( - job_id=self._get_param("jobId"), - targets=self._get_param("targets"), - description=self._get_param("description"), - document_source=self._get_param("documentSource"), - document=self._get_param("document"), - presigned_url_config=self._get_param("presignedUrlConfig"), - target_selection=self._get_param("targetSelection"), - job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), - document_parameters=self._get_param("documentParameters") - ) - - return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) - - def describe_job(self): - job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - forceCanceled=job.force, - reasonCode=job.reason_code, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) - - def delete_job(self): - job_id = self._get_param("jobId") - force = self._get_bool_param("force") - - self.iot_backend.delete_job(job_id=job_id, - force=force) - - return json.dumps(dict()) - - def cancel_job(self): - job_id = self._get_param("jobId") - reason_code = self._get_param("reasonCode") - comment = self._get_param("comment") - force = self._get_bool_param("force") - - job = self.iot_backend.cancel_job(job_id=job_id, - reason_code=reason_code, - comment=comment, - force=force) - - return json.dumps(job.to_dict()) - - def get_job_document(self): - job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) - - if job.document is not None: - return json.dumps({'document': job.document}) - else: - # job.document_source is not None: - # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) - - def list_jobs(self): - status = self._get_param("status"), - target_selection = self._get_param("targetSelection"), - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - previous_next_token = self._get_param("nextToken") - thing_group_name = self._get_param("thingGroupName"), - thing_group_id = self._get_param("thingGroupId") - jobs, next_token = self.iot_backend.list_jobs(status=status, - target_selection=target_selection, - max_results=max_results, - token=previous_next_token, - thing_group_name=thing_group_name, - thing_group_id=thing_group_id) - - return json.dumps(dict(jobs=jobs, nextToken=next_token)) - - def describe_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - execution_number = self._get_int_param("executionNumber") - job_execution = self.iot_backend.describe_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number) - - return json.dumps(dict(execution=job_execution.to_get_dict())) - - def cancel_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - force = self._get_bool_param("force") - expected_version = self._get_int_param("expectedVersion") - status_details = self._get_param("statusDetails") - - self.iot_backend.cancel_job_execution(job_id=job_id, - thing_name=thing_name, - force=force, - expected_version=expected_version, - status_details=status_details) - - return json.dumps(dict()) - - def delete_job_execution(self): - job_id = self._get_param("jobId") - thing_name = self._get_param("thingName") - execution_number = self._get_int_param("executionNumber") - force = self._get_bool_param("force") - - self.iot_backend.delete_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number, - force=force) - - return json.dumps(dict()) - - def list_job_executions_for_job(self): - job_id = self._get_param("jobId") - status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, - status=status, - max_results=max_results, - next_token=next_token) - - return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) - - def list_job_executions_for_thing(self): - thing_name = self._get_param("thingName") - status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier - next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, - status=status, - max_results=max_results, - next_token=next_token) - - return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) - - def create_keys_and_certificate(self): - set_as_active = self._get_bool_param("setAsActive") - cert, key_pair = self.iot_backend.create_keys_and_certificate( - set_as_active=set_as_active, - ) - return json.dumps(dict( - certificateArn=cert.arn, - certificateId=cert.certificate_id, - certificatePem=cert.certificate_pem, - keyPair=key_pair - )) - - def delete_certificate(self): - certificate_id = self._get_param("certificateId") - self.iot_backend.delete_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict()) - - def describe_certificate(self): - certificate_id = self._get_param("certificateId") - certificate = self.iot_backend.describe_certificate( - certificate_id=certificate_id, - ) - return json.dumps(dict(certificateDescription=certificate.to_description_dict())) - - def list_certificates(self): - # page_size = self._get_int_param("pageSize") - # marker = self._get_param("marker") - # ascending_order = self._get_param("ascendingOrder") - certificates = self.iot_backend.list_certificates() - # TODO: implement pagination in the future - return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) - - def register_certificate(self): - certificate_pem = self._get_param("certificatePem") - ca_certificate_pem = self._get_param("caCertificatePem") - set_as_active = self._get_bool_param("setAsActive") - status = self._get_param("status") - - cert = self.iot_backend.register_certificate( - certificate_pem=certificate_pem, - ca_certificate_pem=ca_certificate_pem, - set_as_active=set_as_active, - status=status - ) - return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) - - def update_certificate(self): - certificate_id = self._get_param("certificateId") - new_status = self._get_param("newStatus") - self.iot_backend.update_certificate( - certificate_id=certificate_id, - new_status=new_status, - ) - return json.dumps(dict()) - - def create_policy(self): - policy_name = self._get_param("policyName") - policy_document = self._get_param("policyDocument") - policy = self.iot_backend.create_policy( - policy_name=policy_name, - policy_document=policy_document, - ) - return json.dumps(policy.to_dict_at_creation()) - - def list_policies(self): - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_policies() - - # TODO: implement pagination in the future - return json.dumps(dict(policies=[_.to_dict() for _ in policies])) - - def get_policy(self): - policy_name = self._get_param("policyName") - policy = self.iot_backend.get_policy( - policy_name=policy_name, - ) - return json.dumps(policy.to_get_dict()) - - def delete_policy(self): - policy_name = self._get_param("policyName") - self.iot_backend.delete_policy( - policy_name=policy_name, - ) - return json.dumps(dict()) - - def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) - - return json.dumps(dict(policy_version.to_dict_at_creation())) - - def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.set_default_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - policy_version = self.iot_backend.get_policy_version(policy_name, version_id) - return json.dumps(dict(policy_version.to_get_dict())) - - def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) - - return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) - - def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') - self.iot_backend.delete_policy_version(policy_name, version_id) - - return json.dumps(dict()) - - def attach_policy(self): - policy_name = self._get_param("policyName") - target = self._get_param('target') - self.iot_backend.attach_policy( - policy_name=policy_name, - target=target, - ) - return json.dumps(dict()) - - def list_attached_policies(self): - principal = unquote(self._get_param('target')) - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def attach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.attach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_policy(self): - policy_name = self._get_param("policyName") - target = self._get_param('target') - self.iot_backend.detach_policy( - policy_name=policy_name, - target=target, - ) - return json.dumps(dict()) - - def detach_principal_policy(self): - policy_name = self._get_param("policyName") - principal = self.headers.get('x-amzn-iot-principal') - self.iot_backend.detach_principal_policy( - policy_name=policy_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_policies(self): - principal = self.headers.get('x-amzn-iot-principal') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - policies = self.iot_backend.list_principal_policies( - principal_arn=principal - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) - - def list_policy_principals(self): - policy_name = self.headers.get('x-amzn-iot-policy') - # marker = self._get_param("marker") - # page_size = self._get_int_param("pageSize") - # ascending_order = self._get_param("ascendingOrder") - principals = self.iot_backend.list_policy_principals( - policy_name=policy_name, - ) - # TODO: implement pagination in the future - next_marker = None - return json.dumps(dict(principals=principals, nextMarker=next_marker)) - - def attach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.attach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def detach_thing_principal(self): - thing_name = self._get_param("thingName") - principal = self.headers.get('x-amzn-principal') - self.iot_backend.detach_thing_principal( - thing_name=thing_name, - principal_arn=principal, - ) - return json.dumps(dict()) - - def list_principal_things(self): - next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - principal = self.headers.get('x-amzn-principal') - things = self.iot_backend.list_principal_things( - principal_arn=principal, - ) - # TODO: implement pagination in the future - next_token = None - return json.dumps(dict(things=things, nextToken=next_token)) - - def list_thing_principals(self): - thing_name = self._get_param("thingName") - principals = self.iot_backend.list_thing_principals( - thing_name=thing_name, - ) - return json.dumps(dict(principals=principals)) - - def describe_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group = self.iot_backend.describe_thing_group( - thing_group_name=thing_group_name, - ) - return json.dumps(thing_group.to_dict()) - - def create_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - parent_group_name = self._get_param("parentGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( - thing_group_name=thing_group_name, - parent_group_name=parent_group_name, - thing_group_properties=thing_group_properties, - ) - return json.dumps(dict( - thingGroupName=thing_group_name, - thingGroupArn=thing_group_arn, - thingGroupId=thing_group_id) - ) - - def delete_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - expected_version = self._get_param("expectedVersion") - self.iot_backend.delete_thing_group( - thing_group_name=thing_group_name, - expected_version=expected_version, - ) - return json.dumps(dict()) - - def list_thing_groups(self): - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - parent_group = self._get_param("parentGroup") - name_prefix_filter = self._get_param("namePrefixFilter") - recursive = self._get_param("recursive") - thing_groups = self.iot_backend.list_thing_groups( - parent_group=parent_group, - name_prefix_filter=name_prefix_filter, - recursive=recursive, - ) - next_token = None - rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=rets, nextToken=next_token)) - - def update_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_properties = self._get_param("thingGroupProperties") - expected_version = self._get_param("expectedVersion") - version = self.iot_backend.update_thing_group( - thing_group_name=thing_group_name, - thing_group_properties=thing_group_properties, - expected_version=expected_version, - ) - return json.dumps(dict(version=version)) - - def add_thing_to_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.add_thing_to_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def remove_thing_from_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - thing_group_arn = self._get_param("thingGroupArn") - thing_name = self._get_param("thingName") - thing_arn = self._get_param("thingArn") - self.iot_backend.remove_thing_from_thing_group( - thing_group_name=thing_group_name, - thing_group_arn=thing_group_arn, - thing_name=thing_name, - thing_arn=thing_arn, - ) - return json.dumps(dict()) - - def list_things_in_thing_group(self): - thing_group_name = self._get_param("thingGroupName") - recursive = self._get_param("recursive") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - things = self.iot_backend.list_things_in_thing_group( - thing_group_name=thing_group_name, - recursive=recursive, - ) - next_token = None - thing_names = [_.thing_name for _ in things] - # TODO: implement pagination in the future - return json.dumps(dict(things=thing_names, nextToken=next_token)) - - def list_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - # next_token = self._get_param("nextToken") - # max_results = self._get_int_param("maxResults") - thing_groups = self.iot_backend.list_thing_groups_for_thing( - thing_name=thing_name - ) - next_token = None - # TODO: implement pagination in the future - return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) - - def update_thing_groups_for_thing(self): - thing_name = self._get_param("thingName") - thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] - thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] - self.iot_backend.update_thing_groups_for_thing( - thing_name=thing_name, - thing_groups_to_add=thing_groups_to_add, - thing_groups_to_remove=thing_groups_to_remove, - ) - return json.dumps(dict()) +from __future__ import unicode_literals + +import json +from six.moves.urllib.parse import unquote + +from moto.core.responses import BaseResponse +from .models import iot_backends + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + thing_types = [_.to_dict() for _ in thing_types] + if previous_next_token is None: + result = thing_types[0:max_results] + next_token = str(max_results) if len(thing_types) > max_results else None + else: + token = int(previous_next_token) + result = thing_types[token:token + max_results] + next_token = str(token + max_results) if len(thing_types) > token + max_results else None + + return json.dumps(dict(thingTypes=result, nextToken=next_token)) + + def list_things(self): + previous_next_token = self._get_param("nextToken") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things, next_token = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + max_results=max_results, + token=previous_next_token + ) + + return json.dumps(dict(things=things, nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_job(self): + job_arn, job_id, description = self.iot_backend.create_job( + job_id=self._get_param("jobId"), + targets=self._get_param("targets"), + description=self._get_param("description"), + document_source=self._get_param("documentSource"), + document=self._get_param("document"), + presigned_url_config=self._get_param("presignedUrlConfig"), + target_selection=self._get_param("targetSelection"), + job_executions_rollout_config=self._get_param("jobExecutionsRolloutConfig"), + document_parameters=self._get_param("documentParameters") + ) + + return json.dumps(dict(jobArn=job_arn, jobId=job_id, description=description)) + + def describe_job(self): + job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) + return json.dumps(dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection + ))) + + def delete_job(self): + job_id = self._get_param("jobId") + force = self._get_bool_param("force") + + self.iot_backend.delete_job(job_id=job_id, + force=force) + + return json.dumps(dict()) + + def cancel_job(self): + job_id = self._get_param("jobId") + reason_code = self._get_param("reasonCode") + comment = self._get_param("comment") + force = self._get_bool_param("force") + + job = self.iot_backend.cancel_job(job_id=job_id, + reason_code=reason_code, + comment=comment, + force=force) + + return json.dumps(job.to_dict()) + + def get_job_document(self): + job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) + + if job.document is not None: + return json.dumps({'document': job.document}) + else: + # job.document_source is not None: + # TODO: needs to be implemented to get document_source's content from S3 + return json.dumps({'document': ''}) + + def list_jobs(self): + status = self._get_param("status"), + target_selection = self._get_param("targetSelection"), + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + previous_next_token = self._get_param("nextToken") + thing_group_name = self._get_param("thingGroupName"), + thing_group_id = self._get_param("thingGroupId") + jobs, next_token = self.iot_backend.list_jobs(status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id) + + return json.dumps(dict(jobs=jobs, nextToken=next_token)) + + def describe_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + job_execution = self.iot_backend.describe_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number) + + return json.dumps(dict(execution=job_execution.to_get_dict())) + + def cancel_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + force = self._get_bool_param("force") + expected_version = self._get_int_param("expectedVersion") + status_details = self._get_param("statusDetails") + + self.iot_backend.cancel_job_execution(job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details) + + return json.dumps(dict()) + + def delete_job_execution(self): + job_id = self._get_param("jobId") + thing_name = self._get_param("thingName") + execution_number = self._get_int_param("executionNumber") + force = self._get_bool_param("force") + + self.iot_backend.delete_job_execution(job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force) + + return json.dumps(dict()) + + def list_job_executions_for_job(self): + job_id = self._get_param("jobId") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def list_job_executions_for_thing(self): + thing_name = self._get_param("thingName") + status = self._get_param("status") + max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + next_token = self._get_param("nextToken") + job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token) + + return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) + + def create_keys_and_certificate(self): + set_as_active = self._get_bool_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: implement pagination in the future + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def register_certificate(self): + certificate_pem = self._get_param("certificatePem") + ca_certificate_pem = self._get_param("caCertificatePem") + set_as_active = self._get_bool_param("setAsActive") + status = self._get_param("status") + + cert = self.iot_backend.register_certificate( + certificate_pem=certificate_pem, + ca_certificate_pem=ca_certificate_pem, + set_as_active=set_as_active, + status=status + ) + return json.dumps(dict(certificateId=cert.certificate_id, certificateArn=cert.arn)) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: implement pagination in the future + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def create_policy_version(self): + policy_name = self._get_param('policyName') + policy_document = self._get_param('policyDocument') + set_as_default = self._get_bool_param('setAsDefault') + policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + + return json.dumps(dict(policy_version.to_dict_at_creation())) + + def set_default_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.set_default_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def get_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + policy_version = self.iot_backend.get_policy_version(policy_name, version_id) + return json.dumps(dict(policy_version.to_get_dict())) + + def list_policy_versions(self): + policy_name = self._get_param('policyName') + policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + + return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) + + def delete_policy_version(self): + policy_name = self._get_param('policyName') + version_id = self._get_param('policyVersionId') + self.iot_backend.delete_policy_version(policy_name, version_id) + + return json.dumps(dict()) + + def attach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.attach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + + def list_attached_policies(self): + principal = unquote(self._get_param('target')) + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + policies = self.iot_backend.list_attached_policies( + target=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_policy(self): + policy_name = self._get_param("policyName") + target = self._get_param('target') + self.iot_backend.detach_policy( + policy_name=policy_name, + target=target, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: implement pagination in the future + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: implement pagination in the future + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 8f11912b0..4a142b292 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,1409 +1,1409 @@ -from __future__ import unicode_literals - -import json -import sure #noqa -import boto3 - -from moto import mock_iot -from botocore.exceptions import ClientError -from nose.tools import assert_raises - -@mock_iot -def test_attach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - -@mock_iot -def test_detach_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - res['policies'][0]['policyName'].should.equal('my-policy') - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_attached_policies(target=cert_arn) - res.should.have.key('policies').which.should.be.empty - - -@mock_iot -def test_list_attached_policies(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - policies = client.list_attached_policies(target=cert['certificateArn']) - policies['policies'].should.be.empty - - -@mock_iot -def test_policy_versions(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) - policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) - - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), - setAsDefault=True) - policy1.should.have.key('policyArn').which.should_not.be.none - policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy1.should.have.key('policyVersionId').which.should.equal('2') - policy1.should.have.key('isDefaultVersion').which.should.equal(True) - - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), - setAsDefault=False) - policy2.should.have.key('policyArn').which.should_not.be.none - policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy2.should.have.key('policyVersionId').which.should.equal('3') - policy2.should.have.key('isDefaultVersion').which.should.equal(False) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) - - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) - list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) - default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) - - policy = client.get_policy(policyName=policy_name) - policy.should.have.key('policyName').which.should.equal(policy_name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) - policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) - - client.delete_policy_version(policyName=policy_name, policyVersionId='1') - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) - - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) - policy_versions = client.list_policy_versions(policyName=policy_name) - policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) - - # should fail as it's the default policy. Should use delete_policy instead - try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) - assert False, 'Should have failed in previous call' - except Exception as exception: - exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') - - -@mock_iot -def test_things(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - type_name = 'my-type-name' - - # thing type - thing_type = client.create_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeArn') - - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(1) - for thing_type in res['thingTypes']: - thing_type.should.have.key('thingTypeName').which.should_not.be.none - - thing_type = client.describe_thing_type(thingTypeName=type_name) - thing_type.should.have.key('thingTypeName').which.should.equal(type_name) - thing_type.should.have.key('thingTypeProperties') - thing_type.should.have.key('thingTypeMetadata') - - # thing - thing = client.create_thing(thingName=name, thingTypeName=type_name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - - thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should.have.key('thingName').which.should_not.be.none - thing.should.have.key('thingArn').which.should_not.be.none - res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') - - thing = client.describe_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('defaultClientId') - thing.should.have.key('thingTypeName') - thing.should.have.key('attributes') - thing.should.have.key('version') - - # delete thing - client.delete_thing(thingName=name) - res = client.list_things() - res.should.have.key('things').which.should.have.length_of(0) - - # delete thing type - client.delete_thing_type(thingTypeName=type_name) - res = client.list_thing_types() - res.should.have.key('thingTypes').which.should.have.length_of(0) - - -@mock_iot -def test_list_thing_types(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 100): - client.create_thing_type(thingTypeName=str(i + 1)) - - thing_types = client.list_thing_types() - thing_types.should.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') - - thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) - thing_types.should.have.key('thingTypes').which.should.have.length_of(50) - thing_types.should_not.have.key('nextToken') - thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') - - -@mock_iot -def test_list_thing_types_with_typename_filter(): - client = boto3.client('iot', region_name='ap-northeast-1') - - client.create_thing_type(thingTypeName='thing') - client.create_thing_type(thingTypeName='thingType') - client.create_thing_type(thingTypeName='thingTypeName') - client.create_thing_type(thingTypeName='thingTypeNameGroup') - client.create_thing_type(thingTypeName='shouldNotFind') - client.create_thing_type(thingTypeName='find me it shall not') - - thing_types = client.list_thing_types(thingTypeName='thing') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(4) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - thing_types = client.list_thing_types(thingTypeName='thingTypeName') - thing_types.should_not.have.key('nextToken') - thing_types.should.have.key('thingTypes').which.should.have.length_of(2) - thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') - thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') - - -@mock_iot -def test_list_things_with_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - - for i in range(0, 200): - client.create_thing(thingName=str(i + 1)) - - things = client.list_things() - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('1') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') - things['things'][-1]['thingName'].should.equal('50') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('51') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - - things = client.list_things(nextToken=things['nextToken']) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('101') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - - things = client.list_things(nextToken=things['nextToken']) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('151') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - - -@mock_iot -def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): - client = boto3.client('iot', region_name='ap-northeast-1') - client.create_thing_type(thingTypeName='my-thing-type') - - for i in range(0, 200): - if not (i + 1) % 3: - attribute_payload = { - 'attributes': { - 'foo': 'bar' - } - } - elif not (i + 1) % 5: - attribute_payload = { - 'attributes': { - 'bar': 'foo' - } - } - else: - attribute_payload = {} - - if not (i + 1) % 2: - thing_type_name = 'my-thing-type' - client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) - else: - client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) - - # Test filter for thingTypeName - things = client.list_things(thingTypeName=thing_type_name) - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('2') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') - things['things'][-1]['thingName'].should.equal('100') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('102') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') - things['things'][-1]['thingName'].should.equal('200') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') - all(item['thingTypeName'] == thing_type_name for item in things['things']) - - # Test filter for attributes - things = client.list_things(attributeName='foo', attributeValue='bar') - things.should.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(50) - things['things'][0]['thingName'].should.equal('3') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') - things['things'][-1]['thingName'].should.equal('150') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(16) - things['things'][0]['thingName'].should.equal('153') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} for item in things['things']) - - # Test filter for attributes and thingTypeName - things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') - things.should_not.have.key('nextToken') - things.should.have.key('things').which.should.have.length_of(33) - things['things'][0]['thingName'].should.equal('6') - things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') - things['things'][-1]['thingName'].should.equal('198') - things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') - all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) - - -@mock_iot -def test_certs(): - client = boto3.client('iot', region_name='us-east-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificatePem').which.should_not.be.none - cert.should.have.key('keyPair') - cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none - cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('certificateArn').which.should_not.be.none - cert_desc.should.have.key('certificateId').which.should_not.be.none - cert_desc.should.have.key('certificatePem').which.should_not.be.none - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - cert_pem = cert_desc['certificatePem'] - - res = client.list_certificates() - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('REVOKED') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates') - - # Test register_certificate flow - cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('certificateArn').which.should_not.be.none - cert_id = cert['certificateId'] - - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - for cert in res['certificates']: - cert.should.have.key('certificateArn').which.should_not.be.none - cert.should.have.key('certificateId').which.should_not.be.none - cert.should.have.key('status').which.should_not.be.none - cert.should.have.key('creationDate').which.should_not.be.none - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - cert = client.describe_certificate(certificateId=cert_id) - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('REVOKED') - - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates') - - -@mock_iot -def test_delete_policy_validation(): - doc = """{ - "Version": "2012-10-17", - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "iot: *" - ], - "Resource":"*" - } - ] - } - """ - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - policy_name = 'my-policy' - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - with assert_raises(ClientError) as e: - client.delete_policy(policyName=policy_name) - e.exception.response['Error']['Message'].should.contain( - 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - client.delete_policy(policyName=policy_name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_delete_certificate_validation(): - doc = """{ - "Version": "2012-10-17", - "Statement":[ - { - "Effect":"Allow", - "Action":[ - "iot: *" - ], - "Resource":"*" - } - ] - } - """ - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=True) - cert_id = cert['certificateId'] - cert_arn = cert['certificateArn'] - policy_name = 'my-policy' - thing_name = 'thing-1' - client.create_policy(policyName=policy_name, policyDocument=doc) - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - client.create_thing(thingName=thing_name) - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Certificate must be deactivated (not ACTIVE) before deletion.') - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.update_certificate(certificateId=cert_id, newStatus='REVOKED') - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Things must be detached before deletion (arn: %s)' % cert_arn) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - with assert_raises(ClientError) as e: - client.delete_certificate(certificateId=cert_id) - e.exception.response['Error']['Message'].should.contain( - 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(1) - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - client.delete_certificate(certificateId=cert_id) - res = client.list_certificates() - res.should.have.key('certificates').which.should.have.length_of(0) - - -@mock_iot -def test_certs_create_inactive(): - client = boto3.client('iot', region_name='ap-northeast-1') - cert = client.create_keys_and_certificate(setAsActive=False) - cert_id = cert['certificateId'] - - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('INACTIVE') - - client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') - cert = client.describe_certificate(certificateId=cert_id) - cert.should.have.key('certificateDescription') - cert_desc = cert['certificateDescription'] - cert_desc.should.have.key('status').which.should.equal('ACTIVE') - - -@mock_iot -def test_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=name, policyDocument=doc) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('policyVersionId').which.should.equal('1') - - policy = client.get_policy(policyName=name) - policy.should.have.key('policyName').which.should.equal(name) - policy.should.have.key('policyArn').which.should_not.be.none - policy.should.have.key('policyDocument').which.should.equal(doc) - policy.should.have.key('defaultVersionId').which.should.equal('1') - - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - client.delete_policy(policyName=name) - res = client.list_policies() - res.should.have.key('policies').which.should.have.length_of(0) - - -@mock_iot -def test_principal_policy(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - # do nothing if policy have already attached to certificate - client.attach_policy(policyName=policy_name, target=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_policy(policyName=policy_name, target=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - with assert_raises(ClientError) as e: - client.detach_policy(policyName=policy_name, target=cert_arn) - e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') - - -@mock_iot -def test_principal_policy_deprecated(): - client = boto3.client('iot', region_name='ap-northeast-1') - policy_name = 'my-policy' - doc = '{}' - policy = client.create_policy(policyName=policy_name, policyDocument=doc) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_principal_policy(policyName=policy_name, principal=cert_arn) - - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(1) - for policy in res['policies']: - policy.should.have.key('policyName').which.should_not.be.none - policy.should.have.key('policyArn').which.should_not.be.none - - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_principal_policy(policyName=policy_name, principal=cert_arn) - res = client.list_principal_policies(principal=cert_arn) - res.should.have.key('policies').which.should.have.length_of(0) - res = client.list_policy_principals(policyName=policy_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_principal_thing(): - client = boto3.client('iot', region_name='ap-northeast-1') - thing_name = 'my-thing' - thing = client.create_thing(thingName=thing_name) - cert = client.create_keys_and_certificate(setAsActive=True) - cert_arn = cert['certificateArn'] - - client.attach_thing_principal(thingName=thing_name, principal=cert_arn) - - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(1) - for thing in res['things']: - thing.should_not.be.none - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(1) - for principal in res['principals']: - principal.should_not.be.none - - client.detach_thing_principal(thingName=thing_name, principal=cert_arn) - res = client.list_principal_things(principal=cert_arn) - res.should.have.key('things').which.should.have.length_of(0) - res = client.list_thing_principals(thingName=thing_name) - res.should.have.key('principals').which.should.have.length_of(0) - - -@mock_iot -def test_thing_groups(): - client = boto3.client('iot', region_name='ap-northeast-1') - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(1) - for thing_group in res['thingGroups']: - thing_group.should.have.key('groupName').which.should_not.be.none - thing_group.should.have.key('groupArn').which.should_not.be.none - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupProperties') - thing_group.should.have.key('thingGroupMetadata') - thing_group.should.have.key('version') - - # delete thing group - client.delete_thing_group(thingGroupName=group_name) - res = client.list_thing_groups() - res.should.have.key('thingGroups').which.should.have.length_of(0) - - # props create test - props = { - 'thingGroupDescription': 'my first thing group', - 'attributePayload': { - 'attributes': { - 'key1': 'val01', - 'Key02': 'VAL2' - } - } - } - thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - # props update test with merge - new_props = { - 'attributePayload': { - 'attributes': { - 'k3': 'v3' - }, - 'merge': True - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('key1').which.should.equal('val01') - res_props.should.have.key('Key02').which.should.equal('VAL2') - - res_props.should.have.key('k3').which.should.equal('v3') - - # props update test - new_props = { - 'attributePayload': { - 'attributes': { - 'k4': 'v4' - } - } - } - client.update_thing_group( - thingGroupName=group_name, - thingGroupProperties=new_props - ) - thing_group = client.describe_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupProperties') \ - .which.should.have.key('attributePayload') \ - .which.should.have.key('attributes') - res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] - res_props.should.have.key('k4').which.should.equal('v4') - res_props.should_not.have.key('key1') - - -@mock_iot -def test_thing_group_relations(): - client = boto3.client('iot', region_name='ap-northeast-1') - name = 'my-thing' - group_name = 'my-group-name' - - # thing group - thing_group = client.create_thing_group(thingGroupName=group_name) - thing_group.should.have.key('thingGroupName').which.should.equal(group_name) - thing_group.should.have.key('thingGroupArn') - - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # add in 4 way - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.add_thing_to_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - thing_groups = client.list_thing_groups_for_thing( - thingName=name - ) - thing_groups.should.have.key('thingGroups') - thing_groups['thingGroups'].should.have.length_of(1) - - # remove in 4 way - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingName=name - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupName=group_name, - thingArn=thing['thingArn'] - ) - client.remove_thing_from_thing_group( - thingGroupArn=thing_group['thingGroupArn'], - thingName=name - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - # update thing group for thing - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToAdd=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(1) - - client.update_thing_groups_for_thing( - thingName=name, - thingGroupsToRemove=[ - group_name - ] - ) - things = client.list_things_in_thing_group( - thingGroupName=group_name - ) - things.should.have.key('things') - things['things'].should.have.length_of(0) - - -@mock_iot -def test_create_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - -@mock_iot -def test_list_jobs(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing# job document - # job_document = { - # "field": "value" - # } - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job1 = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job1.should.have.key('jobId').which.should.equal(job_id) - job1.should.have.key('jobArn') - job1.should.have.key('description') - - job2 = client.create_job( - jobId=job_id+"1", - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job2.should.have.key('jobId').which.should.equal(job_id+"1") - job2.should.have.key('jobArn') - job2.should.have.key('description') - - jobs = client.list_jobs() - jobs.should.have.key('jobs') - jobs.should_not.have.key('nextToken') - jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) - jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") - - -@mock_iot -def test_describe_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('documentSource') - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_describe_job_1(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobArn") - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("targets") - job.should.have.key('job').which.should.have.key("jobProcessDetails") - job.should.have.key('job').which.should.have.key("lastUpdatedAt") - job.should.have.key('job').which.should.have.key("createdAt") - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") - job.should.have.key('job').which.should.have.key("presignedUrlConfig") - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') - job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) - - -@mock_iot -def test_delete_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - - client.delete_job(jobId=job_id) - - client.list_jobs()['jobs'].should.have.length_of(0) - - -@mock_iot -def test_cancel_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - - job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job = client.describe_job(jobId=job_id) - job.should.have.key('job') - job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') - job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) - job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') - job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') - - -@mock_iot -def test_get_job_document_with_document_source(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal('') - - -@mock_iot -def test_get_job_document_with_document(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - - job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") - - -@mock_iot -def test_describe_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.describe_job_execution(jobId=job_id, thingName=name) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) - job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') - job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) - job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) - job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - job_execution['execution'].should.have.key('queuedAt') - job_execution['execution'].should.have.key('startedAt') - job_execution['execution'].should.have.key('lastUpdatedAt') - job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) - - job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) - job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') - job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) - job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) - job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - job_execution['execution'].should.have.key('queuedAt') - job_execution['execution'].should.have.key('startedAt') - job_execution['execution'].should.have.key('lastUpdatedAt') - job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) - job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) - - try: - client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) - except ClientError as exc: - error_code = exc.response['Error']['Code'] - error_code.should.equal('ResourceNotFoundException') - else: - raise Exception("Should have raised error") - - -@mock_iot -def test_cancel_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - client.cancel_job_execution(jobId=job_id, thingName=name) - job_execution = client.describe_job_execution(jobId=job_id, thingName=name) - job_execution.should.have.key('execution') - job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') - - -@mock_iot -def test_delete_job_execution(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) - try: - client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) - except ClientError as exc: - error_code = exc.response['Error']['Code'] - error_code.should.equal('ResourceNotFoundException') - else: - raise Exception("Should have raised error") - - -@mock_iot -def test_list_job_executions_for_job(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.list_job_executions_for_job(jobId=job_id) - job_execution.should.have.key('executionSummaries') - job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) - - -@mock_iot -def test_list_job_executions_for_thing(): - client = boto3.client('iot', region_name='eu-west-1') - name = "my-thing" - job_id = "TestJob" - # thing - thing = client.create_thing(thingName=name) - thing.should.have.key('thingName').which.should.equal(name) - thing.should.have.key('thingArn') - - # job document - job_document = { - "field": "value" - } - - job = client.create_job( - jobId=job_id, - targets=[thing["thingArn"]], - document=json.dumps(job_document), - description="Description", - presignedUrlConfig={ - 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', - 'expiresInSec': 123 - }, - targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - 'maximumPerMinute': 10 - } - ) - - job.should.have.key('jobId').which.should.equal(job_id) - job.should.have.key('jobArn') - job.should.have.key('description') - - job_execution = client.list_job_executions_for_thing(thingName=name) - job_execution.should.have.key('executionSummaries') - job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) - +from __future__ import unicode_literals + +import json +import sure #noqa +import boto3 + +from moto import mock_iot +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +@mock_iot +def test_attach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + +@mock_iot +def test_detach_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + res['policies'][0]['policyName'].should.equal('my-policy') + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_attached_policies(target=cert_arn) + res.should.have.key('policies').which.should.be.empty + + +@mock_iot +def test_list_attached_policies(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + policies = client.list_attached_policies(target=cert['certificateArn']) + policies['policies'].should.be.empty + + +@mock_iot +def test_policy_versions(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({})) + policy.should.have.key('defaultVersionId').which.should.equal(policy['defaultVersionId']) + + policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_1'}), + setAsDefault=True) + policy1.should.have.key('policyArn').which.should_not.be.none + policy1.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy1.should.have.key('policyVersionId').which.should.equal('2') + policy1.should.have.key('isDefaultVersion').which.should.equal(True) + + policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({'version': 'version_2'}), + setAsDefault=False) + policy2.should.have.key('policyArn').which.should_not.be.none + policy2.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy2.should.have.key('policyVersionId').which.should.equal('3') + policy2.should.have.key('isDefaultVersion').which.should.equal(False) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy1['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_1'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy1['policyVersionId']) + + client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(3) + list(map(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])).count(True).should.equal(1) + default_policy = list(filter(lambda item: item['isDefaultVersion'], policy_versions['policyVersions'])) + default_policy[0].should.have.key('versionId').should.equal(policy2['policyVersionId']) + + policy = client.get_policy(policyName=policy_name) + policy.should.have.key('policyName').which.should.equal(policy_name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(json.dumps({'version': 'version_2'})) + policy.should.have.key('defaultVersionId').which.should.equal(policy2['policyVersionId']) + + client.delete_policy_version(policyName=policy_name, policyVersionId='1') + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(2) + + client.delete_policy_version(policyName=policy_name, policyVersionId=policy1['policyVersionId']) + policy_versions = client.list_policy_versions(policyName=policy_name) + policy_versions.should.have.key('policyVersions').which.should.have.length_of(1) + + # should fail as it's the default policy. Should use delete_policy instead + try: + client.delete_policy_version(policyName=policy_name, policyVersionId=policy2['policyVersionId']) + assert False, 'Should have failed in previous call' + except Exception as exception: + exception.response['Error']['Message'].should.equal('Cannot delete the default version of a policy') + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + thing.should.have.key('thingArn').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_list_thing_types(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 100): + client.create_thing_type(thingTypeName=str(i + 1)) + + thing_types = client.list_thing_types() + thing_types.should.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('1') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('50') + + thing_types = client.list_thing_types(nextToken=thing_types['nextToken']) + thing_types.should.have.key('thingTypes').which.should.have.length_of(50) + thing_types.should_not.have.key('nextToken') + thing_types['thingTypes'][0]['thingTypeName'].should.equal('51') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('100') + + +@mock_iot +def test_list_thing_types_with_typename_filter(): + client = boto3.client('iot', region_name='ap-northeast-1') + + client.create_thing_type(thingTypeName='thing') + client.create_thing_type(thingTypeName='thingType') + client.create_thing_type(thingTypeName='thingTypeName') + client.create_thing_type(thingTypeName='thingTypeNameGroup') + client.create_thing_type(thingTypeName='shouldNotFind') + client.create_thing_type(thingTypeName='find me it shall not') + + thing_types = client.list_thing_types(thingTypeName='thing') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(4) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thing') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + thing_types = client.list_thing_types(thingTypeName='thingTypeName') + thing_types.should_not.have.key('nextToken') + thing_types.should.have.key('thingTypes').which.should.have.length_of(2) + thing_types['thingTypes'][0]['thingTypeName'].should.equal('thingTypeName') + thing_types['thingTypes'][-1]['thingTypeName'].should.equal('thingTypeNameGroup') + + +@mock_iot +def test_list_things_with_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + + for i in range(0, 200): + client.create_thing(thingName=str(i + 1)) + + things = client.list_things() + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('1') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/1') + things['things'][-1]['thingName'].should.equal('50') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/50') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('51') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/51') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + + things = client.list_things(nextToken=things['nextToken']) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('101') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/101') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + + things = client.list_things(nextToken=things['nextToken']) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('151') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/151') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + + +@mock_iot +def test_list_things_with_attribute_and_thing_type_filter_and_next_token(): + client = boto3.client('iot', region_name='ap-northeast-1') + client.create_thing_type(thingTypeName='my-thing-type') + + for i in range(0, 200): + if not (i + 1) % 3: + attribute_payload = { + 'attributes': { + 'foo': 'bar' + } + } + elif not (i + 1) % 5: + attribute_payload = { + 'attributes': { + 'bar': 'foo' + } + } + else: + attribute_payload = {} + + if not (i + 1) % 2: + thing_type_name = 'my-thing-type' + client.create_thing(thingName=str(i + 1), thingTypeName=thing_type_name, attributePayload=attribute_payload) + else: + client.create_thing(thingName=str(i + 1), attributePayload=attribute_payload) + + # Test filter for thingTypeName + things = client.list_things(thingTypeName=thing_type_name) + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('2') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/2') + things['things'][-1]['thingName'].should.equal('100') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/100') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], thingTypeName=thing_type_name) + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('102') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/102') + things['things'][-1]['thingName'].should.equal('200') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/200') + all(item['thingTypeName'] == thing_type_name for item in things['things']) + + # Test filter for attributes + things = client.list_things(attributeName='foo', attributeValue='bar') + things.should.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(50) + things['things'][0]['thingName'].should.equal('3') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/3') + things['things'][-1]['thingName'].should.equal('150') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/150') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + things = client.list_things(nextToken=things['nextToken'], attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(16) + things['things'][0]['thingName'].should.equal('153') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/153') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} for item in things['things']) + + # Test filter for attributes and thingTypeName + things = client.list_things(thingTypeName=thing_type_name, attributeName='foo', attributeValue='bar') + things.should_not.have.key('nextToken') + things.should.have.key('things').which.should.have.length_of(33) + things['things'][0]['thingName'].should.equal('6') + things['things'][0]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/6') + things['things'][-1]['thingName'].should.equal('198') + things['things'][-1]['thingArn'].should.equal('arn:aws:iot:ap-northeast-1:1:thing/198') + all(item['attributes'] == {'foo': 'bar'} and item['thingTypeName'] == thing_type_name for item in things['things']) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='us-east-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + cert_pem = cert_desc['certificatePem'] + + res = client.list_certificates() + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + # Test register_certificate flow + cert = client.register_certificate(certificatePem=cert_pem, setAsActive=True) + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificateArn').which.should_not.be.none + cert_id = cert['certificateId'] + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('REVOKED') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates') + + +@mock_iot +def test_delete_policy_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_policy(policyName=policy_name) + e.exception.response['Error']['Message'].should.contain( + 'The policy cannot be deleted as the policy is attached to one or more principals (name=%s)' % policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_policy(policyName=policy_name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_delete_certificate_validation(): + doc = """{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect":"Allow", + "Action":[ + "iot: *" + ], + "Resource":"*" + } + ] + } + """ + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert_id = cert['certificateId'] + cert_arn = cert['certificateArn'] + policy_name = 'my-policy' + thing_name = 'thing-1' + client.create_policy(policyName=policy_name, policyDocument=doc) + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + client.create_thing(thingName=thing_name) + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate must be deactivated (not ACTIVE) before deletion.') + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Things must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + with assert_raises(ClientError) as e: + client.delete_certificate(certificateId=cert_id) + e.exception.response['Error']['Message'].should.contain( + 'Certificate policies must be detached before deletion (arn: %s)' % cert_arn) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + + +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + # do nothing if policy have already attached to certificate + client.attach_policy(policyName=policy_name, target=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_policy(policyName=policy_name, target=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + with assert_raises(ClientError) as e: + client.detach_policy(policyName=policy_name, target=cert_arn) + e.exception.response['Error']['Code'].should.equal('ResourceNotFoundException') + + +@mock_iot +def test_principal_policy_deprecated(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties') \ + .which.should.have.key('attributePayload') \ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + +@mock_iot +def test_create_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + +@mock_iot +def test_list_jobs(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing# job document + # job_document = { + # "field": "value" + # } + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job1 = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job1.should.have.key('jobId').which.should.equal(job_id) + job1.should.have.key('jobArn') + job1.should.have.key('description') + + job2 = client.create_job( + jobId=job_id+"1", + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job2.should.have.key('jobId').which.should.equal(job_id+"1") + job2.should.have.key('jobArn') + job2.should.have.key('description') + + jobs = client.list_jobs() + jobs.should.have.key('jobs') + jobs.should_not.have.key('nextToken') + jobs['jobs'][0].should.have.key('jobId').which.should.equal(job_id) + jobs['jobs'][1].should.have.key('jobId').which.should.equal(job_id+"1") + + +@mock_iot +def test_describe_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('documentSource') + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_describe_job_1(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobArn") + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("targets") + job.should.have.key('job').which.should.have.key("jobProcessDetails") + job.should.have.key('job').which.should.have.key("lastUpdatedAt") + job.should.have.key('job').which.should.have.key("createdAt") + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig") + job.should.have.key('job').which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key('job').which.should.have.key("presignedUrlConfig") + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "roleArn").which.should.equal('arn:aws:iam::1:role/service-role/iot_job_role') + job.should.have.key('job').which.should.have.key("presignedUrlConfig").which.should.have.key( + "expiresInSec").which.should.equal(123) + job.should.have.key('job').which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( + "maximumPerMinute").which.should.equal(10) + + +@mock_iot +def test_delete_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + client.delete_job(jobId=job_id) + + client.list_jobs()['jobs'].should.have.length_of(0) + + +@mock_iot +def test_cancel_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + + job = client.cancel_job(jobId=job_id, reasonCode='Because', comment='You are') + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job = client.describe_job(jobId=job_id) + job.should.have.key('job') + job.should.have.key('job').which.should.have.key("jobId").which.should.equal(job_id) + job.should.have.key('job').which.should.have.key("status").which.should.equal('CANCELED') + job.should.have.key('job').which.should.have.key("forceCanceled").which.should.equal(False) + job.should.have.key('job').which.should.have.key("reasonCode").which.should.equal('Because') + job.should.have.key('job').which.should.have.key("comment").which.should.equal('You are') + + +@mock_iot +def test_get_job_document_with_document_source(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal('') + + +@mock_iot +def test_get_job_document_with_document(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + + job_document = client.get_job_document(jobId=job_id) + job_document.should.have.key('document').which.should.equal("{\"field\": \"value\"}") + + +@mock_iot +def test_describe_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('jobId').which.should.equal(job_id) + job_execution['execution'].should.have.key('status').which.should.equal('QUEUED') + job_execution['execution'].should.have.key('forceCanceled').which.should.equal(False) + job_execution['execution'].should.have.key('statusDetails').which.should.equal({'detailsMap': {}}) + job_execution['execution'].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + job_execution['execution'].should.have.key('queuedAt') + job_execution['execution'].should.have.key('startedAt') + job_execution['execution'].should.have.key('lastUpdatedAt') + job_execution['execution'].should.have.key('executionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('versionNumber').which.should.equal(123) + job_execution['execution'].should.have.key('approximateSecondsBeforeTimedOut').which.should.equal(123) + + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_cancel_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.cancel_job_execution(jobId=job_id, thingName=name) + job_execution = client.describe_job_execution(jobId=job_id, thingName=name) + job_execution.should.have.key('execution') + job_execution['execution'].should.have.key('status').which.should.equal('CANCELED') + + +@mock_iot +def test_delete_job_execution(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + client.delete_job_execution(jobId=job_id, thingName=name, executionNumber=123) + try: + client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + except ClientError as exc: + error_code = exc.response['Error']['Code'] + error_code.should.equal('ResourceNotFoundException') + else: + raise Exception("Should have raised error") + + +@mock_iot +def test_list_job_executions_for_job(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_job(jobId=job_id) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('thingArn').which.should.equal(thing["thingArn"]) + + +@mock_iot +def test_list_job_executions_for_thing(): + client = boto3.client('iot', region_name='eu-west-1') + name = "my-thing" + job_id = "TestJob" + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # job document + job_document = { + "field": "value" + } + + job = client.create_job( + jobId=job_id, + targets=[thing["thingArn"]], + document=json.dumps(job_document), + description="Description", + presignedUrlConfig={ + 'roleArn': 'arn:aws:iam::1:role/service-role/iot_job_role', + 'expiresInSec': 123 + }, + targetSelection="CONTINUOUS", + jobExecutionsRolloutConfig={ + 'maximumPerMinute': 10 + } + ) + + job.should.have.key('jobId').which.should.equal(job_id) + job.should.have.key('jobArn') + job.should.have.key('description') + + job_execution = client.list_job_executions_for_thing(thingName=name) + job_execution.should.have.key('executionSummaries') + job_execution['executionSummaries'][0].should.have.key('jobId').which.should.equal(job_id) + From b94147a1d57ccfec5b18ddd5b9b9aed503d01015 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Fri, 30 Aug 2019 14:18:01 +0200 Subject: [PATCH 020/125] Merge remote-tracking branch 'upstream/master' --- moto/core/access_control.py | 365 +++++ moto/ec2/responses/launch_templates.py | 252 +++ moto/iam/policy_validation.py | 450 ++++++ moto/ses/feedback.py | 81 + moto/sts/exceptions.py | 15 + moto/sts/utils.py | 35 + other_langs/sqsSample.scala | 25 + tests/test_core/test_auth.py | 706 +++++++++ tests/test_core/test_context_manager.py | 12 + tests/test_core/test_socket.py | 48 + tests/test_ec2/test_launch_templates.py | 415 +++++ tests/test_iam/test_iam_policies.py | 1861 +++++++++++++++++++++++ tests/test_ses/test_ses_sns_boto3.py | 114 ++ update_version_from_git.py | 120 ++ 14 files changed, 4499 insertions(+) create mode 100644 moto/core/access_control.py create mode 100644 moto/ec2/responses/launch_templates.py create mode 100644 moto/iam/policy_validation.py create mode 100644 moto/ses/feedback.py create mode 100644 moto/sts/exceptions.py create mode 100644 moto/sts/utils.py create mode 100644 other_langs/sqsSample.scala create mode 100644 tests/test_core/test_auth.py create mode 100644 tests/test_core/test_context_manager.py create mode 100644 tests/test_core/test_socket.py create mode 100644 tests/test_ec2/test_launch_templates.py create mode 100644 tests/test_iam/test_iam_policies.py create mode 100644 tests/test_ses/test_ses_sns_boto3.py create mode 100644 update_version_from_git.py diff --git a/moto/core/access_control.py b/moto/core/access_control.py new file mode 100644 index 000000000..3fb11eebd --- /dev/null +++ b/moto/core/access_control.py @@ -0,0 +1,365 @@ +""" +This implementation is NOT complete, there are many things to improve. +The following is a list of the most important missing features and inaccuracies. + +TODO add support for more principals, apart from IAM users and assumed IAM roles +TODO add support for the Resource and Condition parts of IAM policies +TODO add support and create tests for all services in moto (for example, API Gateway is probably not supported currently) +TODO implement service specific error messages (currently, EC2 and S3 are supported separately, everything else defaults to the errors IAM returns) +TODO include information about the action's resource in error messages (once the Resource element in IAM policies is supported) +TODO check all other actions that are performed by the action called by the user (for example, autoscaling:CreateAutoScalingGroup requires permission for iam:CreateServiceLinkedRole too - see https://docs.aws.amazon.com/autoscaling/ec2/userguide/control-access-using-iam.html) +TODO add support for resource-based policies + +""" + +import json +import logging +import re +from abc import abstractmethod, ABCMeta +from enum import Enum + +import six +from botocore.auth import SigV4Auth, S3SigV4Auth +from botocore.awsrequest import AWSRequest +from botocore.credentials import Credentials +from six import string_types + +from moto.iam.models import ACCOUNT_ID, Policy +from moto.iam import iam_backend +from moto.core.exceptions import SignatureDoesNotMatchError, AccessDeniedError, InvalidClientTokenIdError, AuthFailureError +from moto.s3.exceptions import ( + BucketAccessDeniedError, + S3AccessDeniedError, + BucketInvalidTokenError, + S3InvalidTokenError, + S3InvalidAccessKeyIdError, + BucketInvalidAccessKeyIdError, + BucketSignatureDoesNotMatchError, + S3SignatureDoesNotMatchError +) +from moto.sts import sts_backend + +log = logging.getLogger(__name__) + + +def create_access_key(access_key_id, headers): + if access_key_id.startswith("AKIA") or "X-Amz-Security-Token" not in headers: + return IAMUserAccessKey(access_key_id, headers) + else: + return AssumedRoleAccessKey(access_key_id, headers) + + +class IAMUserAccessKey(object): + + def __init__(self, access_key_id, headers): + iam_users = iam_backend.list_users('/', None, None) + for iam_user in iam_users: + for access_key in iam_user.access_keys: + if access_key.access_key_id == access_key_id: + self._owner_user_name = iam_user.name + self._access_key_id = access_key_id + self._secret_access_key = access_key.secret_access_key + if "X-Amz-Security-Token" in headers: + raise CreateAccessKeyFailure(reason="InvalidToken") + return + raise CreateAccessKeyFailure(reason="InvalidId") + + @property + def arn(self): + return "arn:aws:iam::{account_id}:user/{iam_user_name}".format( + account_id=ACCOUNT_ID, + iam_user_name=self._owner_user_name + ) + + def create_credentials(self): + return Credentials(self._access_key_id, self._secret_access_key) + + def collect_policies(self): + user_policies = [] + + inline_policy_names = iam_backend.list_user_policies(self._owner_user_name) + for inline_policy_name in inline_policy_names: + inline_policy = iam_backend.get_user_policy(self._owner_user_name, inline_policy_name) + user_policies.append(inline_policy) + + attached_policies, _ = iam_backend.list_attached_user_policies(self._owner_user_name) + user_policies += attached_policies + + user_groups = iam_backend.get_groups_for_user(self._owner_user_name) + for user_group in user_groups: + inline_group_policy_names = iam_backend.list_group_policies(user_group.name) + for inline_group_policy_name in inline_group_policy_names: + inline_user_group_policy = iam_backend.get_group_policy(user_group.name, inline_group_policy_name) + user_policies.append(inline_user_group_policy) + + attached_group_policies, _ = iam_backend.list_attached_group_policies(user_group.name) + user_policies += attached_group_policies + + return user_policies + + +class AssumedRoleAccessKey(object): + + def __init__(self, access_key_id, headers): + for assumed_role in sts_backend.assumed_roles: + if assumed_role.access_key_id == access_key_id: + self._access_key_id = access_key_id + self._secret_access_key = assumed_role.secret_access_key + self._session_token = assumed_role.session_token + self._owner_role_name = assumed_role.role_arn.split("/")[-1] + self._session_name = assumed_role.session_name + if headers["X-Amz-Security-Token"] != self._session_token: + raise CreateAccessKeyFailure(reason="InvalidToken") + return + raise CreateAccessKeyFailure(reason="InvalidId") + + @property + def arn(self): + return "arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name}".format( + account_id=ACCOUNT_ID, + role_name=self._owner_role_name, + session_name=self._session_name + ) + + def create_credentials(self): + return Credentials(self._access_key_id, self._secret_access_key, self._session_token) + + def collect_policies(self): + role_policies = [] + + inline_policy_names = iam_backend.list_role_policies(self._owner_role_name) + for inline_policy_name in inline_policy_names: + _, inline_policy = iam_backend.get_role_policy(self._owner_role_name, inline_policy_name) + role_policies.append(inline_policy) + + attached_policies, _ = iam_backend.list_attached_role_policies(self._owner_role_name) + role_policies += attached_policies + + return role_policies + + +class CreateAccessKeyFailure(Exception): + + def __init__(self, reason, *args): + super(CreateAccessKeyFailure, self).__init__(*args) + self.reason = reason + + +@six.add_metaclass(ABCMeta) +class IAMRequestBase(object): + + def __init__(self, method, path, data, headers): + log.debug("Creating {class_name} with method={method}, path={path}, data={data}, headers={headers}".format( + class_name=self.__class__.__name__, method=method, path=path, data=data, headers=headers)) + self._method = method + self._path = path + self._data = data + self._headers = headers + credential_scope = self._get_string_between('Credential=', ',', self._headers['Authorization']) + credential_data = credential_scope.split('/') + self._region = credential_data[2] + self._service = credential_data[3] + self._action = self._service + ":" + (self._data["Action"][0] if isinstance(self._data["Action"], list) else self._data["Action"]) + try: + self._access_key = create_access_key(access_key_id=credential_data[0], headers=headers) + except CreateAccessKeyFailure as e: + self._raise_invalid_access_key(e.reason) + + def check_signature(self): + original_signature = self._get_string_between('Signature=', ',', self._headers['Authorization']) + calculated_signature = self._calculate_signature() + if original_signature != calculated_signature: + self._raise_signature_does_not_match() + + def check_action_permitted(self): + if self._action == 'sts:GetCallerIdentity': # always allowed, even if there's an explicit Deny for it + return True + policies = self._access_key.collect_policies() + + permitted = False + for policy in policies: + iam_policy = IAMPolicy(policy) + permission_result = iam_policy.is_action_permitted(self._action) + if permission_result == PermissionResult.DENIED: + self._raise_access_denied() + elif permission_result == PermissionResult.PERMITTED: + permitted = True + + if not permitted: + self._raise_access_denied() + + @abstractmethod + def _raise_signature_does_not_match(self): + raise NotImplementedError() + + @abstractmethod + def _raise_access_denied(self): + raise NotImplementedError() + + @abstractmethod + def _raise_invalid_access_key(self, reason): + raise NotImplementedError() + + @abstractmethod + def _create_auth(self, credentials): + raise NotImplementedError() + + @staticmethod + def _create_headers_for_aws_request(signed_headers, original_headers): + headers = {} + for key, value in original_headers.items(): + if key.lower() in signed_headers: + headers[key] = value + return headers + + def _create_aws_request(self): + signed_headers = self._get_string_between('SignedHeaders=', ',', self._headers['Authorization']).split(';') + headers = self._create_headers_for_aws_request(signed_headers, self._headers) + request = AWSRequest(method=self._method, url=self._path, data=self._data, headers=headers) + request.context['timestamp'] = headers['X-Amz-Date'] + + return request + + def _calculate_signature(self): + credentials = self._access_key.create_credentials() + auth = self._create_auth(credentials) + request = self._create_aws_request() + canonical_request = auth.canonical_request(request) + string_to_sign = auth.string_to_sign(request, canonical_request) + return auth.signature(string_to_sign, request) + + @staticmethod + def _get_string_between(first_separator, second_separator, string): + return string.partition(first_separator)[2].partition(second_separator)[0] + + +class IAMRequest(IAMRequestBase): + + def _raise_signature_does_not_match(self): + if self._service == "ec2": + raise AuthFailureError() + else: + raise SignatureDoesNotMatchError() + + def _raise_invalid_access_key(self, _): + if self._service == "ec2": + raise AuthFailureError() + else: + raise InvalidClientTokenIdError() + + def _create_auth(self, credentials): + return SigV4Auth(credentials, self._service, self._region) + + def _raise_access_denied(self): + raise AccessDeniedError( + user_arn=self._access_key.arn, + action=self._action + ) + + +class S3IAMRequest(IAMRequestBase): + + def _raise_signature_does_not_match(self): + if "BucketName" in self._data: + raise BucketSignatureDoesNotMatchError(bucket=self._data["BucketName"]) + else: + raise S3SignatureDoesNotMatchError() + + def _raise_invalid_access_key(self, reason): + if reason == "InvalidToken": + if "BucketName" in self._data: + raise BucketInvalidTokenError(bucket=self._data["BucketName"]) + else: + raise S3InvalidTokenError() + else: + if "BucketName" in self._data: + raise BucketInvalidAccessKeyIdError(bucket=self._data["BucketName"]) + else: + raise S3InvalidAccessKeyIdError() + + def _create_auth(self, credentials): + return S3SigV4Auth(credentials, self._service, self._region) + + def _raise_access_denied(self): + if "BucketName" in self._data: + raise BucketAccessDeniedError(bucket=self._data["BucketName"]) + else: + raise S3AccessDeniedError() + + +class IAMPolicy(object): + + def __init__(self, policy): + if isinstance(policy, Policy): + default_version = next(policy_version for policy_version in policy.versions if policy_version.is_default) + policy_document = default_version.document + elif isinstance(policy, string_types): + policy_document = policy + else: + policy_document = policy["policy_document"] + + self._policy_json = json.loads(policy_document) + + def is_action_permitted(self, action): + permitted = False + if isinstance(self._policy_json["Statement"], list): + for policy_statement in self._policy_json["Statement"]: + iam_policy_statement = IAMPolicyStatement(policy_statement) + permission_result = iam_policy_statement.is_action_permitted(action) + if permission_result == PermissionResult.DENIED: + return permission_result + elif permission_result == PermissionResult.PERMITTED: + permitted = True + else: # dict + iam_policy_statement = IAMPolicyStatement(self._policy_json["Statement"]) + return iam_policy_statement.is_action_permitted(action) + + if permitted: + return PermissionResult.PERMITTED + else: + return PermissionResult.NEUTRAL + + +class IAMPolicyStatement(object): + + def __init__(self, statement): + self._statement = statement + + def is_action_permitted(self, action): + is_action_concerned = False + + if "NotAction" in self._statement: + if not self._check_element_matches("NotAction", action): + is_action_concerned = True + else: # Action is present + if self._check_element_matches("Action", action): + is_action_concerned = True + + if is_action_concerned: + if self._statement["Effect"] == "Allow": + return PermissionResult.PERMITTED + else: # Deny + return PermissionResult.DENIED + else: + return PermissionResult.NEUTRAL + + def _check_element_matches(self, statement_element, value): + if isinstance(self._statement[statement_element], list): + for statement_element_value in self._statement[statement_element]: + if self._match(statement_element_value, value): + return True + return False + else: # string + return self._match(self._statement[statement_element], value) + + @staticmethod + def _match(pattern, string): + pattern = pattern.replace("*", ".*") + pattern = "^{pattern}$".format(pattern=pattern) + return re.match(pattern, string) + + +class PermissionResult(Enum): + PERMITTED = 1 + DENIED = 2 + NEUTRAL = 3 diff --git a/moto/ec2/responses/launch_templates.py b/moto/ec2/responses/launch_templates.py new file mode 100644 index 000000000..a8d92a928 --- /dev/null +++ b/moto/ec2/responses/launch_templates.py @@ -0,0 +1,252 @@ +import six +import uuid +from moto.core.responses import BaseResponse +from moto.ec2.models import OWNER_ID +from moto.ec2.exceptions import FilterNotImplementedError +from moto.ec2.utils import filters_from_querystring + +from xml.etree import ElementTree +from xml.dom import minidom + + +def xml_root(name): + root = ElementTree.Element(name, { + "xmlns": "http://ec2.amazonaws.com/doc/2016-11-15/" + }) + request_id = str(uuid.uuid4()) + "example" + ElementTree.SubElement(root, "requestId").text = request_id + + return root + + +def xml_serialize(tree, key, value): + name = key[0].lower() + key[1:] + if isinstance(value, list): + if name[-1] == 's': + name = name[:-1] + + name = name + 'Set' + + node = ElementTree.SubElement(tree, name) + + if isinstance(value, (str, int, float, six.text_type)): + node.text = str(value) + elif isinstance(value, dict): + for dictkey, dictvalue in six.iteritems(value): + xml_serialize(node, dictkey, dictvalue) + elif isinstance(value, list): + for item in value: + xml_serialize(node, 'item', item) + elif value is None: + pass + else: + raise NotImplementedError("Don't know how to serialize \"{}\" to xml".format(value.__class__)) + + +def pretty_xml(tree): + rough = ElementTree.tostring(tree, 'utf-8') + parsed = minidom.parseString(rough) + return parsed.toprettyxml(indent=' ') + + +def parse_object(raw_data): + out_data = {} + for key, value in six.iteritems(raw_data): + key_fix_splits = key.split("_") + key_len = len(key_fix_splits) + + new_key = "" + for i in range(0, key_len): + new_key += key_fix_splits[i][0].upper() + key_fix_splits[i][1:] + + data = out_data + splits = new_key.split(".") + for split in splits[:-1]: + if split not in data: + data[split] = {} + data = data[split] + + data[splits[-1]] = value + + out_data = parse_lists(out_data) + return out_data + + +def parse_lists(data): + for key, value in six.iteritems(data): + if isinstance(value, dict): + keys = data[key].keys() + is_list = all(map(lambda k: k.isnumeric(), keys)) + + if is_list: + new_value = [] + keys = sorted(list(keys)) + for k in keys: + lvalue = value[k] + if isinstance(lvalue, dict): + lvalue = parse_lists(lvalue) + new_value.append(lvalue) + data[key] = new_value + return data + + +class LaunchTemplates(BaseResponse): + def create_launch_template(self): + name = self._get_param('LaunchTemplateName') + version_description = self._get_param('VersionDescription') + tag_spec = self._parse_tag_specification("TagSpecification") + + raw_template_data = self._get_dict_param('LaunchTemplateData.') + parsed_template_data = parse_object(raw_template_data) + + if self.is_not_dryrun('CreateLaunchTemplate'): + if tag_spec: + if 'TagSpecifications' not in parsed_template_data: + parsed_template_data['TagSpecifications'] = [] + converted_tag_spec = [] + for resource_type, tags in six.iteritems(tag_spec): + converted_tag_spec.append({ + "ResourceType": resource_type, + "Tags": [{"Key": key, "Value": value} for key, value in six.iteritems(tags)], + }) + + parsed_template_data['TagSpecifications'].extend(converted_tag_spec) + + template = self.ec2_backend.create_launch_template(name, version_description, parsed_template_data) + version = template.default_version() + + tree = xml_root("CreateLaunchTemplateResponse") + xml_serialize(tree, "launchTemplate", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersionNumber": template.default_version_number, + "latestVersionNumber": version.number, + "launchTemplateId": template.id, + "launchTemplateName": template.name + }) + + return pretty_xml(tree) + + def create_launch_template_version(self): + name = self._get_param('LaunchTemplateName') + tmpl_id = self._get_param('LaunchTemplateId') + if name: + template = self.ec2_backend.get_launch_template_by_name(name) + if tmpl_id: + template = self.ec2_backend.get_launch_template(tmpl_id) + + version_description = self._get_param('VersionDescription') + + raw_template_data = self._get_dict_param('LaunchTemplateData.') + template_data = parse_object(raw_template_data) + + if self.is_not_dryrun('CreateLaunchTemplate'): + version = template.create_version(template_data, version_description) + + tree = xml_root("CreateLaunchTemplateVersionResponse") + xml_serialize(tree, "launchTemplateVersion", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersion": template.is_default(version), + "launchTemplateData": version.data, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + "versionDescription": version.description, + "versionNumber": version.number, + }) + return pretty_xml(tree) + + # def delete_launch_template(self): + # pass + + # def delete_launch_template_versions(self): + # pass + + def describe_launch_template_versions(self): + name = self._get_param('LaunchTemplateName') + template_id = self._get_param('LaunchTemplateId') + if name: + template = self.ec2_backend.get_launch_template_by_name(name) + if template_id: + template = self.ec2_backend.get_launch_template(template_id) + + max_results = self._get_int_param("MaxResults", 15) + versions = self._get_multi_param("LaunchTemplateVersion") + min_version = self._get_int_param("MinVersion") + max_version = self._get_int_param("MaxVersion") + + filters = filters_from_querystring(self.querystring) + if filters: + raise FilterNotImplementedError("all filters", "DescribeLaunchTemplateVersions") + + if self.is_not_dryrun('DescribeLaunchTemplateVersions'): + tree = ElementTree.Element("DescribeLaunchTemplateVersionsResponse", { + "xmlns": "http://ec2.amazonaws.com/doc/2016-11-15/", + }) + request_id = ElementTree.SubElement(tree, "requestId") + request_id.text = "65cadec1-b364-4354-8ca8-4176dexample" + + versions_node = ElementTree.SubElement(tree, "launchTemplateVersionSet") + + ret_versions = [] + if versions: + for v in versions: + ret_versions.append(template.get_version(int(v))) + elif min_version: + if max_version: + vMax = max_version + else: + vMax = min_version + max_results + + vMin = min_version - 1 + ret_versions = template.versions[vMin:vMax] + elif max_version: + vMax = max_version + ret_versions = template.versions[:vMax] + else: + ret_versions = template.versions + + ret_versions = ret_versions[:max_results] + + for version in ret_versions: + xml_serialize(versions_node, "item", { + "createTime": version.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersion": True, + "launchTemplateData": version.data, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + "versionDescription": version.description, + "versionNumber": version.number, + }) + + return pretty_xml(tree) + + def describe_launch_templates(self): + max_results = self._get_int_param("MaxResults", 15) + template_names = self._get_multi_param("LaunchTemplateName") + template_ids = self._get_multi_param("LaunchTemplateId") + filters = filters_from_querystring(self.querystring) + + if self.is_not_dryrun("DescribeLaunchTemplates"): + tree = ElementTree.Element("DescribeLaunchTemplatesResponse") + templates_node = ElementTree.SubElement(tree, "launchTemplates") + + templates = self.ec2_backend.get_launch_templates(template_names=template_names, template_ids=template_ids, filters=filters) + + templates = templates[:max_results] + + for template in templates: + xml_serialize(templates_node, "item", { + "createTime": template.create_time, + "createdBy": "arn:aws:iam::{OWNER_ID}:root".format(OWNER_ID=OWNER_ID), + "defaultVersionNumber": template.default_version_number, + "latestVersionNumber": template.latest_version_number, + "launchTemplateId": template.id, + "launchTemplateName": template.name, + }) + + return pretty_xml(tree) + + # def modify_launch_template(self): + # pass diff --git a/moto/iam/policy_validation.py b/moto/iam/policy_validation.py new file mode 100644 index 000000000..6ee286072 --- /dev/null +++ b/moto/iam/policy_validation.py @@ -0,0 +1,450 @@ +import json +import re + +from six import string_types + +from moto.iam.exceptions import MalformedPolicyDocument + + +VALID_TOP_ELEMENTS = [ + "Version", + "Id", + "Statement", + "Conditions" +] + +VALID_VERSIONS = [ + "2008-10-17", + "2012-10-17" +] + +VALID_STATEMENT_ELEMENTS = [ + "Sid", + "Action", + "NotAction", + "Resource", + "NotResource", + "Effect", + "Condition" +] + +VALID_EFFECTS = [ + "Allow", + "Deny" +] + +VALID_CONDITIONS = [ + "StringEquals", + "StringNotEquals", + "StringEqualsIgnoreCase", + "StringNotEqualsIgnoreCase", + "StringLike", + "StringNotLike", + "NumericEquals", + "NumericNotEquals", + "NumericLessThan", + "NumericLessThanEquals", + "NumericGreaterThan", + "NumericGreaterThanEquals", + "DateEquals", + "DateNotEquals", + "DateLessThan", + "DateLessThanEquals", + "DateGreaterThan", + "DateGreaterThanEquals", + "Bool", + "BinaryEquals", + "IpAddress", + "NotIpAddress", + "ArnEquals", + "ArnLike", + "ArnNotEquals", + "ArnNotLike", + "Null" +] + +VALID_CONDITION_PREFIXES = [ + "ForAnyValue:", + "ForAllValues:" +] + +VALID_CONDITION_POSTFIXES = [ + "IfExists" +] + +SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS = { + "iam": 'IAM resource {resource} cannot contain region information.', + "s3": 'Resource {resource} can not contain region information.' +} + +VALID_RESOURCE_PATH_STARTING_VALUES = { + "iam": { + "values": ["user/", "federated-user/", "role/", "group/", "instance-profile/", "mfa/", "server-certificate/", + "policy/", "sms-mfa/", "saml-provider/", "oidc-provider/", "report/", "access-report/"], + "error_message": 'IAM resource path must either be "*" or start with {values}.' + } +} + + +class IAMPolicyDocumentValidator: + + def __init__(self, policy_document): + self._policy_document = policy_document + self._policy_json = {} + self._statements = [] + self._resource_error = "" # the first resource error found that does not generate a legacy parsing error + + def validate(self): + try: + self._validate_syntax() + except Exception: + raise MalformedPolicyDocument("Syntax errors in policy.") + try: + self._validate_version() + except Exception: + raise MalformedPolicyDocument("Policy document must be version 2012-10-17 or greater.") + try: + self._perform_first_legacy_parsing() + self._validate_resources_for_formats() + self._validate_not_resources_for_formats() + except Exception: + raise MalformedPolicyDocument("The policy failed legacy parsing") + try: + self._validate_sid_uniqueness() + except Exception: + raise MalformedPolicyDocument("Statement IDs (SID) in a single policy must be unique.") + try: + self._validate_action_like_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain actions.") + try: + self._validate_resource_exist() + except Exception: + raise MalformedPolicyDocument("Policy statement must contain resources.") + + if self._resource_error != "": + raise MalformedPolicyDocument(self._resource_error) + + self._validate_actions_for_prefixes() + self._validate_not_actions_for_prefixes() + + def _validate_syntax(self): + self._policy_json = json.loads(self._policy_document) + assert isinstance(self._policy_json, dict) + self._validate_top_elements() + self._validate_version_syntax() + self._validate_id_syntax() + self._validate_statements_syntax() + + def _validate_top_elements(self): + top_elements = self._policy_json.keys() + for element in top_elements: + assert element in VALID_TOP_ELEMENTS + + def _validate_version_syntax(self): + if "Version" in self._policy_json: + assert self._policy_json["Version"] in VALID_VERSIONS + + def _validate_version(self): + assert self._policy_json["Version"] == "2012-10-17" + + def _validate_sid_uniqueness(self): + sids = [] + for statement in self._statements: + if "Sid" in statement: + assert statement["Sid"] not in sids + sids.append(statement["Sid"]) + + def _validate_statements_syntax(self): + assert "Statement" in self._policy_json + assert isinstance(self._policy_json["Statement"], (dict, list)) + + if isinstance(self._policy_json["Statement"], dict): + self._statements.append(self._policy_json["Statement"]) + else: + self._statements += self._policy_json["Statement"] + + assert self._statements + for statement in self._statements: + self._validate_statement_syntax(statement) + + @staticmethod + def _validate_statement_syntax(statement): + assert isinstance(statement, dict) + for statement_element in statement.keys(): + assert statement_element in VALID_STATEMENT_ELEMENTS + + assert ("Resource" not in statement or "NotResource" not in statement) + assert ("Action" not in statement or "NotAction" not in statement) + + IAMPolicyDocumentValidator._validate_effect_syntax(statement) + IAMPolicyDocumentValidator._validate_action_syntax(statement) + IAMPolicyDocumentValidator._validate_not_action_syntax(statement) + IAMPolicyDocumentValidator._validate_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_not_resource_syntax(statement) + IAMPolicyDocumentValidator._validate_condition_syntax(statement) + IAMPolicyDocumentValidator._validate_sid_syntax(statement) + + @staticmethod + def _validate_effect_syntax(statement): + assert "Effect" in statement + assert isinstance(statement["Effect"], string_types) + assert statement["Effect"].lower() in [allowed_effect.lower() for allowed_effect in VALID_EFFECTS] + + @staticmethod + def _validate_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Action") + + @staticmethod + def _validate_not_action_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotAction") + + @staticmethod + def _validate_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "Resource") + + @staticmethod + def _validate_not_resource_syntax(statement): + IAMPolicyDocumentValidator._validate_string_or_list_of_strings_syntax(statement, "NotResource") + + @staticmethod + def _validate_string_or_list_of_strings_syntax(statement, key): + if key in statement: + assert isinstance(statement[key], (string_types, list)) + if isinstance(statement[key], list): + for resource in statement[key]: + assert isinstance(resource, string_types) + + @staticmethod + def _validate_condition_syntax(statement): + if "Condition" in statement: + assert isinstance(statement["Condition"], dict) + for condition_key, condition_value in statement["Condition"].items(): + assert isinstance(condition_value, dict) + for condition_element_key, condition_element_value in condition_value.items(): + assert isinstance(condition_element_value, (list, string_types)) + + if IAMPolicyDocumentValidator._strip_condition_key(condition_key) not in VALID_CONDITIONS: + assert not condition_value # empty dict + + @staticmethod + def _strip_condition_key(condition_key): + for valid_prefix in VALID_CONDITION_PREFIXES: + if condition_key.startswith(valid_prefix): + condition_key = condition_key[len(valid_prefix):] + break # strip only the first match + + for valid_postfix in VALID_CONDITION_POSTFIXES: + if condition_key.endswith(valid_postfix): + condition_key = condition_key[:-len(valid_postfix)] + break # strip only the first match + + return condition_key + + @staticmethod + def _validate_sid_syntax(statement): + if "Sid" in statement: + assert isinstance(statement["Sid"], string_types) + + def _validate_id_syntax(self): + if "Id" in self._policy_json: + assert isinstance(self._policy_json["Id"], string_types) + + def _validate_resource_exist(self): + for statement in self._statements: + assert ("Resource" in statement or "NotResource" in statement) + if "Resource" in statement and isinstance(statement["Resource"], list): + assert statement["Resource"] + elif "NotResource" in statement and isinstance(statement["NotResource"], list): + assert statement["NotResource"] + + def _validate_action_like_exist(self): + for statement in self._statements: + assert ("Action" in statement or "NotAction" in statement) + if "Action" in statement and isinstance(statement["Action"], list): + assert statement["Action"] + elif "NotAction" in statement and isinstance(statement["NotAction"], list): + assert statement["NotAction"] + + def _validate_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("Action") + + def _validate_not_actions_for_prefixes(self): + self._validate_action_like_for_prefixes("NotAction") + + def _validate_action_like_for_prefixes(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_action_prefix(statement[key]) + else: + for action in statement[key]: + self._validate_action_prefix(action) + + @staticmethod + def _validate_action_prefix(action): + action_parts = action.split(":") + if len(action_parts) == 1 and action_parts[0] != "*": + raise MalformedPolicyDocument("Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.") + elif len(action_parts) > 2: + raise MalformedPolicyDocument("Actions/Condition can contain only one colon.") + + vendor_pattern = re.compile(r'[^a-zA-Z0-9\-.]') + if action_parts[0] != "*" and vendor_pattern.search(action_parts[0]): + raise MalformedPolicyDocument("Vendor {vendor} is not valid".format(vendor=action_parts[0])) + + def _validate_resources_for_formats(self): + self._validate_resource_like_for_formats("Resource") + + def _validate_not_resources_for_formats(self): + self._validate_resource_like_for_formats("NotResource") + + def _validate_resource_like_for_formats(self, key): + for statement in self._statements: + if key in statement: + if isinstance(statement[key], string_types): + self._validate_resource_format(statement[key]) + else: + for resource in sorted(statement[key], reverse=True): + self._validate_resource_format(resource) + if self._resource_error == "": + IAMPolicyDocumentValidator._legacy_parse_resource_like(statement, key) + + def _validate_resource_format(self, resource): + if resource != "*": + resource_partitions = resource.partition(":") + + if resource_partitions[1] == "": + self._resource_error = 'Resource {resource} must be in ARN format or "*".'.format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + if resource_partitions[0] != "aws": + remaining_resource_parts = resource_partitions[2].split(":") + + arn1 = remaining_resource_parts[0] if remaining_resource_parts[0] != "" or len(remaining_resource_parts) > 1 else "*" + arn2 = remaining_resource_parts[1] if len(remaining_resource_parts) > 1 else "*" + arn3 = remaining_resource_parts[2] if len(remaining_resource_parts) > 2 else "*" + arn4 = ":".join(remaining_resource_parts[3:]) if len(remaining_resource_parts) > 3 else "*" + self._resource_error = 'Partition "{partition}" is not valid for resource "arn:{partition}:{arn1}:{arn2}:{arn3}:{arn4}".'.format( + partition=resource_partitions[0], + arn1=arn1, + arn2=arn2, + arn3=arn3, + arn4=arn4 + ) + return + + if resource_partitions[1] != ":": + self._resource_error = "Resource vendor must be fully qualified and cannot contain regexes." + return + + resource_partitions = resource_partitions[2].partition(":") + + service = resource_partitions[0] + + if service in SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS.keys() and not resource_partitions[2].startswith(":"): + self._resource_error = SERVICE_TYPE_REGION_INFORMATION_ERROR_ASSOCIATIONS[service].format(resource=resource) + return + + resource_partitions = resource_partitions[2].partition(":") + resource_partitions = resource_partitions[2].partition(":") + + if service in VALID_RESOURCE_PATH_STARTING_VALUES.keys(): + valid_start = False + for valid_starting_value in VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]: + if resource_partitions[2].startswith(valid_starting_value): + valid_start = True + break + if not valid_start: + self._resource_error = VALID_RESOURCE_PATH_STARTING_VALUES[service]["error_message"].format( + values=", ".join(VALID_RESOURCE_PATH_STARTING_VALUES[service]["values"]) + ) + + def _perform_first_legacy_parsing(self): + """This method excludes legacy parsing resources, since that have to be done later.""" + for statement in self._statements: + self._legacy_parse_statement(statement) + + @staticmethod + def _legacy_parse_statement(statement): + assert statement["Effect"] in VALID_EFFECTS # case-sensitive matching + if "Condition" in statement: + for condition_key, condition_value in statement["Condition"].items(): + IAMPolicyDocumentValidator._legacy_parse_condition(condition_key, condition_value) + + @staticmethod + def _legacy_parse_resource_like(statement, key): + if isinstance(statement[key], string_types): + if statement[key] != "*": + assert statement[key].count(":") >= 5 or "::" not in statement[key] + assert statement[key].split(":")[2] != "" + else: # list + for resource in statement[key]: + if resource != "*": + assert resource.count(":") >= 5 or "::" not in resource + assert resource[2] != "" + + @staticmethod + def _legacy_parse_condition(condition_key, condition_value): + stripped_condition_key = IAMPolicyDocumentValidator._strip_condition_key(condition_key) + + if stripped_condition_key.startswith("Date"): + for condition_element_key, condition_element_value in condition_value.items(): + if isinstance(condition_element_value, string_types): + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(condition_element_value) + else: # it has to be a list + for date_condition_value in condition_element_value: + IAMPolicyDocumentValidator._legacy_parse_date_condition_value(date_condition_value) + + @staticmethod + def _legacy_parse_date_condition_value(date_condition_value): + if "t" in date_condition_value.lower() or "-" in date_condition_value: + IAMPolicyDocumentValidator._validate_iso_8601_datetime(date_condition_value.lower()) + else: # timestamp + assert 0 <= int(date_condition_value) <= 9223372036854775807 + + @staticmethod + def _validate_iso_8601_datetime(datetime): + datetime_parts = datetime.partition("t") + negative_year = datetime_parts[0].startswith("-") + date_parts = datetime_parts[0][1:].split("-") if negative_year else datetime_parts[0].split("-") + year = "-" + date_parts[0] if negative_year else date_parts[0] + assert -292275054 <= int(year) <= 292278993 + if len(date_parts) > 1: + month = date_parts[1] + assert 1 <= int(month) <= 12 + if len(date_parts) > 2: + day = date_parts[2] + assert 1 <= int(day) <= 31 + assert len(date_parts) < 4 + + time_parts = datetime_parts[2].split(":") + if time_parts[0] != "": + hours = time_parts[0] + assert 0 <= int(hours) <= 23 + if len(time_parts) > 1: + minutes = time_parts[1] + assert 0 <= int(minutes) <= 59 + if len(time_parts) > 2: + if "z" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("z")[0] + assert time_parts[2].partition("z")[2] == "" + elif "+" in time_parts[2]: + seconds_with_decimal_fraction = time_parts[2].partition("+")[0] + time_zone_data = time_parts[2].partition("+")[2].partition(":") + time_zone_hours = time_zone_data[0] + assert len(time_zone_hours) == 2 + assert 0 <= int(time_zone_hours) <= 23 + if time_zone_data[1] == ":": + time_zone_minutes = time_zone_data[2] + assert len(time_zone_minutes) == 2 + assert 0 <= int(time_zone_minutes) <= 59 + else: + seconds_with_decimal_fraction = time_parts[2] + seconds_with_decimal_fraction_partition = seconds_with_decimal_fraction.partition(".") + seconds = seconds_with_decimal_fraction_partition[0] + assert 0 <= int(seconds) <= 59 + if seconds_with_decimal_fraction_partition[1] == ".": + decimal_seconds = seconds_with_decimal_fraction_partition[2] + assert 0 <= int(decimal_seconds) <= 999999999 diff --git a/moto/ses/feedback.py b/moto/ses/feedback.py new file mode 100644 index 000000000..2d32f9ce0 --- /dev/null +++ b/moto/ses/feedback.py @@ -0,0 +1,81 @@ +""" +SES Feedback messages +Extracted from https://docs.aws.amazon.com/ses/latest/DeveloperGuide/notification-contents.html +""" +COMMON_MAIL = { + "notificationType": "Bounce, Complaint, or Delivery.", + "mail": { + "timestamp": "2018-10-08T14:05:45 +0000", + "messageId": "000001378603177f-7a5433e7-8edb-42ae-af10-f0181f34d6ee-000000", + "source": "sender@example.com", + "sourceArn": "arn:aws:ses:us-west-2:888888888888:identity/example.com", + "sourceIp": "127.0.3.0", + "sendingAccountId": "123456789012", + "destination": [ + "recipient@example.com" + ], + "headersTruncated": False, + "headers": [ + { + "name": "From", + "value": "\"Sender Name\" " + }, + { + "name": "To", + "value": "\"Recipient Name\" " + } + ], + "commonHeaders": { + "from": [ + "Sender Name " + ], + "date": "Mon, 08 Oct 2018 14:05:45 +0000", + "to": [ + "Recipient Name " + ], + "messageId": " custom-message-ID", + "subject": "Message sent using Amazon SES" + } + } +} +BOUNCE = { + "bounceType": "Permanent", + "bounceSubType": "General", + "bouncedRecipients": [ + { + "status": "5.0.0", + "action": "failed", + "diagnosticCode": "smtp; 550 user unknown", + "emailAddress": "recipient1@example.com" + }, + { + "status": "4.0.0", + "action": "delayed", + "emailAddress": "recipient2@example.com" + } + ], + "reportingMTA": "example.com", + "timestamp": "2012-05-25T14:59:38.605Z", + "feedbackId": "000001378603176d-5a4b5ad9-6f30-4198-a8c3-b1eb0c270a1d-000000", + "remoteMtaIp": "127.0.2.0" +} +COMPLAINT = { + "userAgent": "AnyCompany Feedback Loop (V0.01)", + "complainedRecipients": [ + { + "emailAddress": "recipient1@example.com" + } + ], + "complaintFeedbackType": "abuse", + "arrivalDate": "2009-12-03T04:24:21.000-05:00", + "timestamp": "2012-05-25T14:59:38.623Z", + "feedbackId": "000001378603177f-18c07c78-fa81-4a58-9dd1-fedc3cb8f49a-000000" +} +DELIVERY = { + "timestamp": "2014-05-28T22:41:01.184Z", + "processingTimeMillis": 546, + "recipients": ["success@simulator.amazonses.com"], + "smtpResponse": "250 ok: Message 64111812 accepted", + "reportingMTA": "a8-70.smtp-out.amazonses.com", + "remoteMtaIp": "127.0.2.0" +} diff --git a/moto/sts/exceptions.py b/moto/sts/exceptions.py new file mode 100644 index 000000000..bddb56e3f --- /dev/null +++ b/moto/sts/exceptions.py @@ -0,0 +1,15 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class STSClientError(RESTError): + code = 400 + + +class STSValidationError(STSClientError): + + def __init__(self, *args, **kwargs): + super(STSValidationError, self).__init__( + "ValidationError", + *args, **kwargs + ) diff --git a/moto/sts/utils.py b/moto/sts/utils.py new file mode 100644 index 000000000..50767729f --- /dev/null +++ b/moto/sts/utils.py @@ -0,0 +1,35 @@ +import base64 +import os +import random +import string + +import six + +ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX = "8NWMTLYQ" +ACCOUNT_SPECIFIC_ASSUMED_ROLE_ID_PREFIX = "3X42LBCD" +SESSION_TOKEN_PREFIX = "FQoGZXIvYXdzEBYaD" + + +def random_access_key_id(): + return ACCOUNT_SPECIFIC_ACCESS_KEY_PREFIX + _random_uppercase_or_digit_sequence(8) + + +def random_secret_access_key(): + return base64.b64encode(os.urandom(30)).decode() + + +def random_session_token(): + return SESSION_TOKEN_PREFIX + base64.b64encode(os.urandom(266))[len(SESSION_TOKEN_PREFIX):].decode() + + +def random_assumed_role_id(): + return ACCOUNT_SPECIFIC_ASSUMED_ROLE_ID_PREFIX + _random_uppercase_or_digit_sequence(9) + + +def _random_uppercase_or_digit_sequence(length): + return ''.join( + six.text_type( + random.choice( + string.ascii_uppercase + string.digits + )) for _ in range(length) + ) diff --git a/other_langs/sqsSample.scala b/other_langs/sqsSample.scala new file mode 100644 index 000000000..f83daaa22 --- /dev/null +++ b/other_langs/sqsSample.scala @@ -0,0 +1,25 @@ +package com.amazonaws.examples + +import com.amazonaws.client.builder.AwsClientBuilder +import com.amazonaws.regions.{Region, Regions} +import com.amazonaws.services.sqs.AmazonSQSClientBuilder + +import scala.jdk.CollectionConverters._ + +object QueueTest extends App { + val region = Region.getRegion(Regions.US_WEST_2).getName + val serviceEndpoint = "http://localhost:5000" + + val amazonSqs = AmazonSQSClientBuilder.standard() + .withEndpointConfiguration( + new AwsClientBuilder.EndpointConfiguration(serviceEndpoint, region)) + .build + + val queueName = "my-first-queue" + amazonSqs.createQueue(queueName) + + val urls = amazonSqs.listQueues().getQueueUrls.asScala + println("Listing queues") + println(urls.map(url => s" - $url").mkString(System.lineSeparator)) + println() +} diff --git a/tests/test_core/test_auth.py b/tests/test_core/test_auth.py new file mode 100644 index 000000000..00229f808 --- /dev/null +++ b/tests/test_core/test_auth.py @@ -0,0 +1,706 @@ +import json + +import boto3 +import sure # noqa +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +import tests.backport_assert_raises +from nose.tools import assert_raises + +from moto import mock_iam, mock_ec2, mock_s3, mock_sts, mock_elbv2, mock_rds2 +from moto.core import set_initial_no_auth_action_count +from moto.iam.models import ACCOUNT_ID + + +@mock_iam +def create_user_with_access_key(user_name='test-user'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_inline_policy(user_name, policy_document, policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_attached_policy(user_name, policy_document, policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + policy_arn = client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +@mock_iam +def create_user_with_access_key_and_multiple_policies(user_name, inline_policy_document, + attached_policy_document, inline_policy_name='policy1', attached_policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_user(UserName=user_name) + policy_arn = client.create_policy( + PolicyName=attached_policy_name, + PolicyDocument=json.dumps(attached_policy_document) + )['Policy']['Arn'] + client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) + client.put_user_policy(UserName=user_name, PolicyName=inline_policy_name, PolicyDocument=json.dumps(inline_policy_document)) + return client.create_access_key(UserName=user_name)['AccessKey'] + + +def create_group_with_attached_policy_and_add_user(user_name, policy_document, + group_name='test-group', policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + policy_arn = client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +def create_group_with_inline_policy_and_add_user(user_name, policy_document, + group_name='test-group', policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + client.put_group_policy( + GroupName=group_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +def create_group_with_multiple_policies_and_add_user(user_name, inline_policy_document, + attached_policy_document, group_name='test-group', + inline_policy_name='policy1', attached_policy_name='policy1'): + client = boto3.client('iam', region_name='us-east-1') + client.create_group(GroupName=group_name) + client.put_group_policy( + GroupName=group_name, + PolicyName=inline_policy_name, + PolicyDocument=json.dumps(inline_policy_document) + ) + policy_arn = client.create_policy( + PolicyName=attached_policy_name, + PolicyDocument=json.dumps(attached_policy_document) + )['Policy']['Arn'] + client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn) + client.add_user_to_group(GroupName=group_name, UserName=user_name) + + +@mock_iam +@mock_sts +def create_role_with_attached_policy_and_assume_it(role_name, trust_policy_document, + policy_document, session_name='session1', policy_name='policy1'): + iam_client = boto3.client('iam', region_name='us-east-1') + sts_client = boto3.client('sts', region_name='us-east-1') + role_arn = iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy_document) + )['Role']['Arn'] + policy_arn = iam_client.create_policy( + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + )['Policy']['Arn'] + iam_client.attach_role_policy(RoleName=role_name, PolicyArn=policy_arn) + return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)['Credentials'] + + +@mock_iam +@mock_sts +def create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + policy_document, session_name='session1', policy_name='policy1'): + iam_client = boto3.client('iam', region_name='us-east-1') + sts_client = boto3.client('sts', region_name='us-east-1') + role_arn = iam_client.create_role( + RoleName=role_name, + AssumeRolePolicyDocument=json.dumps(trust_policy_document) + )['Role']['Arn'] + iam_client.put_role_policy( + RoleName=role_name, + PolicyName=policy_name, + PolicyDocument=json.dumps(policy_document) + ) + return sts_client.assume_role(RoleArn=role_arn, RoleSessionName=session_name)['Credentials'] + + +@set_initial_no_auth_action_count(0) +@mock_iam +def test_invalid_client_token_id(): + client = boto3.client('iam', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.get_user() + ex.exception.response['Error']['Code'].should.equal('InvalidClientTokenId') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The security token included in the request is invalid.') + + +@set_initial_no_auth_action_count(0) +@mock_ec2 +def test_auth_failure(): + client = boto3.client('ec2', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AuthFailure') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(401) + ex.exception.response['Error']['Message'].should.equal('AWS was not able to validate the provided access credentials') + + +@set_initial_no_auth_action_count(2) +@mock_iam +def test_signature_does_not_match(): + access_key = create_user_with_access_key() + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.get_user() + ex.exception.response['Error']['Code'].should.equal('SignatureDoesNotMatch') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The request signature we calculated does not match the signature you provided. Check your AWS Secret Access Key and signing method. Consult the service documentation for details.') + + +@set_initial_no_auth_action_count(2) +@mock_ec2 +def test_auth_failure_with_valid_access_key_id(): + access_key = create_user_with_access_key() + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AuthFailure') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(401) + ex.exception.response['Error']['Message'].should.equal('AWS was not able to validate the provided access credentials') + + +@set_initial_no_auth_action_count(2) +@mock_ec2 +def test_access_denied_with_no_policy(): + user_name = 'test-user' + access_key = create_user_with_access_key(user_name) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.describe_instances() + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:DescribeInstances" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_with_not_allowing_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:Describe*" + ], + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.run_instances(MaxCount=1, MinCount=1) + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:RunInstances" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_access_denied_with_denying_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:*", + ], + "Resource": "*" + }, + { + "Effect": "Deny", + "Action": "ec2:CreateVpc", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.create_vpc(CidrBlock="10.0.0.0/16") + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:CreateVpc" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_sts +def test_get_caller_identity_allowed_with_denying_policy(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "sts:GetCallerIdentity", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('sts', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.get_caller_identity().should.be.a(dict) + + +@set_initial_no_auth_action_count(3) +@mock_ec2 +def test_allowed_with_wildcard_action(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.describe_tags()['Tags'].should.be.empty + + +@set_initial_no_auth_action_count(4) +@mock_iam +def test_allowed_with_explicit_action_in_attached_policy(): + user_name = 'test-user' + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "iam:ListGroups", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_attached_policy(user_name, attached_policy_document) + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.list_groups()['Groups'].should.be.empty + + +@set_initial_no_auth_action_count(8) +@mock_s3 +@mock_iam +def test_s3_access_denied_with_denying_attached_group_policy(): + user_name = 'test-user' + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" + } + ] + } + group_attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "s3:List*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_attached_policy(user_name, attached_policy_document) + create_group_with_attached_policy_and_add_user(user_name, group_attached_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.list_buckets() + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(6) +@mock_s3 +@mock_iam +def test_s3_access_denied_with_denying_inline_group_policy(): + user_name = 'test-user' + bucket_name = 'test-bucket' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "s3:GetObject", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + create_group_with_inline_policy_and_add_user(user_name, group_inline_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.get_object(Bucket=bucket_name, Key='sdfsdf') + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(10) +@mock_iam +@mock_ec2 +def test_access_denied_with_many_irrelevant_policies(): + user_name = 'test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "ec2:Describe*", + "Resource": "*" + } + ] + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "iam:List*", + "Resource": "*" + } + ] + } + group_attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "Action": "lambda:*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_multiple_policies(user_name, inline_policy_document, + attached_policy_document) + create_group_with_multiple_policies_and_add_user(user_name, group_inline_policy_document, + group_attached_policy_document) + client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + with assert_raises(ClientError) as ex: + client.create_key_pair(KeyName="TestKey") + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:iam::{account_id}:user/{user_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + user_name=user_name, + operation="ec2:CreateKeyPair" + ) + ) + + +@set_initial_no_auth_action_count(4) +@mock_iam +@mock_sts +@mock_ec2 +@mock_elbv2 +def test_allowed_with_temporary_credentials(): + role_name = 'test-role' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "elasticloadbalancing:CreateLoadBalancer", + "ec2:DescribeSubnets" + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_attached_policy_and_assume_it(role_name, trust_policy_document, attached_policy_document) + elbv2_client = boto3.client('elbv2', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + ec2_client = boto3.client('ec2', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + subnets = ec2_client.describe_subnets()['Subnets'] + len(subnets).should.be.greater_than(1) + elbv2_client.create_load_balancer( + Name='test-load-balancer', + Subnets=[ + subnets[0]['SubnetId'], + subnets[1]['SubnetId'] + ] + )['LoadBalancers'].should.have.length_of(1) + + +@set_initial_no_auth_action_count(3) +@mock_iam +@mock_sts +@mock_rds2 +def test_access_denied_with_temporary_credentials(): + role_name = 'test-role' + session_name = 'test-session' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + 'rds:Describe*' + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + attached_policy_document, session_name) + client = boto3.client('rds', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token=credentials['SessionToken']) + with assert_raises(ClientError) as ex: + client.create_db_instance( + DBInstanceIdentifier='test-db-instance', + DBInstanceClass='db.t3', + Engine='aurora-postgresql' + ) + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal( + 'User: arn:aws:sts::{account_id}:assumed-role/{role_name}/{session_name} is not authorized to perform: {operation}'.format( + account_id=ACCOUNT_ID, + role_name=role_name, + session_name=session_name, + operation="rds:CreateDBInstance" + ) + ) + + +@set_initial_no_auth_action_count(3) +@mock_iam +def test_get_user_from_credentials(): + user_name = 'new-test-user' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "iam:*", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + client = boto3.client('iam', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.get_user()['User']['UserName'].should.equal(user_name) + + +@set_initial_no_auth_action_count(0) +@mock_s3 +def test_s3_invalid_access_key_id(): + client = boto3.client('s3', region_name='us-east-1', aws_access_key_id='invalid', aws_secret_access_key='invalid') + with assert_raises(ClientError) as ex: + client.list_buckets() + ex.exception.response['Error']['Code'].should.equal('InvalidAccessKeyId') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The AWS Access Key Id you provided does not exist in our records.') + + +@set_initial_no_auth_action_count(3) +@mock_s3 +@mock_iam +def test_s3_signature_does_not_match(): + bucket_name = 'test-bucket' + access_key = create_user_with_access_key() + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key='invalid') + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.put_object(Bucket=bucket_name, Key="abc") + ex.exception.response['Error']['Code'].should.equal('SignatureDoesNotMatch') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('The request signature we calculated does not match the signature you provided. Check your key and signing method.') + + +@set_initial_no_auth_action_count(7) +@mock_s3 +@mock_iam +def test_s3_access_denied_not_action(): + user_name = 'test-user' + bucket_name = 'test-bucket' + inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + } + group_inline_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny", + "NotAction": "iam:GetUser", + "Resource": "*" + } + ] + } + access_key = create_user_with_access_key_and_inline_policy(user_name, inline_policy_document) + create_group_with_inline_policy_and_add_user(user_name, group_inline_policy_document) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=access_key['AccessKeyId'], + aws_secret_access_key=access_key['SecretAccessKey']) + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.delete_object(Bucket=bucket_name, Key='sdfsdf') + ex.exception.response['Error']['Code'].should.equal('AccessDenied') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(403) + ex.exception.response['Error']['Message'].should.equal('Access Denied') + + +@set_initial_no_auth_action_count(4) +@mock_iam +@mock_sts +@mock_s3 +def test_s3_invalid_token_with_temporary_credentials(): + role_name = 'test-role' + session_name = 'test-session' + bucket_name = 'test-bucket-888' + trust_policy_document = { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::{account_id}:root".format(account_id=ACCOUNT_ID)}, + "Action": "sts:AssumeRole" + } + } + attached_policy_document = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + '*' + ], + "Resource": "*" + } + ] + } + credentials = create_role_with_inline_policy_and_assume_it(role_name, trust_policy_document, + attached_policy_document, session_name) + client = boto3.client('s3', region_name='us-east-1', + aws_access_key_id=credentials['AccessKeyId'], + aws_secret_access_key=credentials['SecretAccessKey'], + aws_session_token='invalid') + client.create_bucket(Bucket=bucket_name) + with assert_raises(ClientError) as ex: + client.list_bucket_metrics_configurations(Bucket=bucket_name) + ex.exception.response['Error']['Code'].should.equal('InvalidToken') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal('The provided token is malformed or otherwise invalid.') diff --git a/tests/test_core/test_context_manager.py b/tests/test_core/test_context_manager.py new file mode 100644 index 000000000..4824e021f --- /dev/null +++ b/tests/test_core/test_context_manager.py @@ -0,0 +1,12 @@ +import sure # noqa +import boto3 +from moto import mock_sqs, settings + + +def test_context_manager_returns_mock(): + with mock_sqs() as sqs_mock: + conn = boto3.client("sqs", region_name='us-west-1') + conn.create_queue(QueueName="queue1") + + if not settings.TEST_SERVER_MODE: + list(sqs_mock.backends['us-west-1'].queues.keys()).should.equal(['queue1']) diff --git a/tests/test_core/test_socket.py b/tests/test_core/test_socket.py new file mode 100644 index 000000000..2e73d7b5f --- /dev/null +++ b/tests/test_core/test_socket.py @@ -0,0 +1,48 @@ +import unittest +from moto import mock_dynamodb2_deprecated, mock_dynamodb2 +import socket + +from six import PY3 + + +class TestSocketPair(unittest.TestCase): + + @mock_dynamodb2_deprecated + def test_asyncio_deprecated(self): + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + import asyncio + self.assertIsNotNone(asyncio.get_event_loop()) + + @mock_dynamodb2_deprecated + def test_socket_pair_deprecated(self): + + # In Python2, the fakesocket is not set, for some reason. + if PY3: + self.assertIn( + 'moto.packages.httpretty.core.fakesock.socket', + str(socket.socket), + 'Our mock should be present' + ) + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() + + + @mock_dynamodb2 + def test_socket_pair(self): + a, b = socket.socketpair() + self.assertIsNotNone(a) + self.assertIsNotNone(b) + if a: + a.close() + if b: + b.close() diff --git a/tests/test_ec2/test_launch_templates.py b/tests/test_ec2/test_launch_templates.py new file mode 100644 index 000000000..87e1d3986 --- /dev/null +++ b/tests/test_ec2/test_launch_templates.py @@ -0,0 +1,415 @@ +import boto3 +import sure # noqa + +from nose.tools import assert_raises +from botocore.client import ClientError + +from moto import mock_ec2 + + +@mock_ec2 +def test_launch_template_create(): + cli = boto3.client("ec2", region_name="us-east-1") + + resp = cli.create_launch_template( + LaunchTemplateName="test-template", + + # the absolute minimum needed to create a template without other resources + LaunchTemplateData={ + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + }, + ) + + resp.should.have.key("LaunchTemplate") + lt = resp["LaunchTemplate"] + lt["LaunchTemplateName"].should.equal("test-template") + lt["DefaultVersionNumber"].should.equal(1) + lt["LatestVersionNumber"].should.equal(1) + + with assert_raises(ClientError) as ex: + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + }, + ) + + str(ex.exception).should.equal( + 'An error occurred (InvalidLaunchTemplateName.AlreadyExistsException) when calling the CreateLaunchTemplate operation: Launch template name already in use.') + + +@mock_ec2 +def test_describe_launch_template_versions(): + template_data = { + "ImageId": "ami-abc123", + "DisableApiTermination": False, + "TagSpecifications": [{ + "ResourceType": "instance", + "Tags": [{ + "Key": "test", + "Value": "value", + }], + }], + "SecurityGroupIds": [ + "sg-1234", + "sg-ab5678", + ], + } + + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData=template_data) + + # test using name + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=['1']) + + templ = resp["LaunchTemplateVersions"][0]["LaunchTemplateData"] + templ.should.equal(template_data) + + # test using id + resp = cli.describe_launch_template_versions( + LaunchTemplateId=create_resp["LaunchTemplate"]["LaunchTemplateId"], + Versions=['1']) + + templ = resp["LaunchTemplateVersions"][0]["LaunchTemplateData"] + templ.should.equal(template_data) + + +@mock_ec2 +def test_create_launch_template_version(): + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + version_resp = cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + version_resp.should.have.key("LaunchTemplateVersion") + version = version_resp["LaunchTemplateVersion"] + version["DefaultVersion"].should.equal(False) + version["LaunchTemplateId"].should.equal(create_resp["LaunchTemplate"]["LaunchTemplateId"]) + version["VersionDescription"].should.equal("new ami") + version["VersionNumber"].should.equal(2) + + +@mock_ec2 +def test_create_launch_template_version_by_id(): + cli = boto3.client("ec2", region_name="us-east-1") + + create_resp = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + version_resp = cli.create_launch_template_version( + LaunchTemplateId=create_resp["LaunchTemplate"]["LaunchTemplateId"], + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + version_resp.should.have.key("LaunchTemplateVersion") + version = version_resp["LaunchTemplateVersion"] + version["DefaultVersion"].should.equal(False) + version["LaunchTemplateId"].should.equal(create_resp["LaunchTemplate"]["LaunchTemplateId"]) + version["VersionDescription"].should.equal("new ami") + version["VersionNumber"].should.equal(2) + + +@mock_ec2 +def test_describe_launch_template_versions_with_multiple_versions(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-abc123") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + + +@mock_ec2 +def test_describe_launch_template_versions_with_versions_option(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=["2", "3"]) + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_template_versions_with_min(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MinVersion="2") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_template_versions_with_max(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MaxVersion="2") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-abc123") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + + +@mock_ec2 +def test_describe_launch_template_versions_with_min_and_max(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-def456" + }, + VersionDescription="new ami") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-hij789" + }, + VersionDescription="new ami, again") + + cli.create_launch_template_version( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-345abc" + }, + VersionDescription="new ami, because why not") + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + MinVersion="2", + MaxVersion="3") + + resp["LaunchTemplateVersions"].should.have.length_of(2) + resp["LaunchTemplateVersions"][0]["LaunchTemplateData"]["ImageId"].should.equal("ami-def456") + resp["LaunchTemplateVersions"][1]["LaunchTemplateData"]["ImageId"].should.equal("ami-hij789") + + +@mock_ec2 +def test_describe_launch_templates(): + cli = boto3.client("ec2", region_name="us-east-1") + + lt_ids = [] + r = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + lt_ids.append(r["LaunchTemplate"]["LaunchTemplateId"]) + + r = cli.create_launch_template( + LaunchTemplateName="test-template2", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + lt_ids.append(r["LaunchTemplate"]["LaunchTemplateId"]) + + # general call, all templates + resp = cli.describe_launch_templates() + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template2") + + # filter by names + resp = cli.describe_launch_templates( + LaunchTemplateNames=["test-template2", "test-template"]) + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template2") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template") + + # filter by ids + resp = cli.describe_launch_templates(LaunchTemplateIds=lt_ids) + resp.should.have.key("LaunchTemplates") + resp["LaunchTemplates"].should.have.length_of(2) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + resp["LaunchTemplates"][1]["LaunchTemplateName"].should.equal("test-template2") + + +@mock_ec2 +def test_describe_launch_templates_with_filters(): + cli = boto3.client("ec2", region_name="us-east-1") + + r = cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + cli.create_tags( + Resources=[r["LaunchTemplate"]["LaunchTemplateId"]], + Tags=[ + {"Key": "tag1", "Value": "a value"}, + {"Key": "another-key", "Value": "this value"}, + ]) + + cli.create_launch_template( + LaunchTemplateName="no-tags", + LaunchTemplateData={ + "ImageId": "ami-abc123" + }) + + resp = cli.describe_launch_templates(Filters=[{ + "Name": "tag:tag1", "Values": ["a value"] + }]) + + resp["LaunchTemplates"].should.have.length_of(1) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("test-template") + + resp = cli.describe_launch_templates(Filters=[{ + "Name": "launch-template-name", "Values": ["no-tags"] + }]) + resp["LaunchTemplates"].should.have.length_of(1) + resp["LaunchTemplates"][0]["LaunchTemplateName"].should.equal("no-tags") + + +@mock_ec2 +def test_create_launch_template_with_tag_spec(): + cli = boto3.client("ec2", region_name="us-east-1") + + cli.create_launch_template( + LaunchTemplateName="test-template", + LaunchTemplateData={"ImageId": "ami-abc123"}, + TagSpecifications=[{ + "ResourceType": "instance", + "Tags": [ + {"Key": "key", "Value": "value"} + ] + }], + ) + + resp = cli.describe_launch_template_versions( + LaunchTemplateName="test-template", + Versions=["1"]) + version = resp["LaunchTemplateVersions"][0] + + version["LaunchTemplateData"].should.have.key("TagSpecifications") + version["LaunchTemplateData"]["TagSpecifications"].should.have.length_of(1) + version["LaunchTemplateData"]["TagSpecifications"][0].should.equal({ + "ResourceType": "instance", + "Tags": [ + {"Key": "key", "Value": "value"} + ] + }) diff --git a/tests/test_iam/test_iam_policies.py b/tests/test_iam/test_iam_policies.py new file mode 100644 index 000000000..e1924a559 --- /dev/null +++ b/tests/test_iam/test_iam_policies.py @@ -0,0 +1,1861 @@ +import json + +import boto3 +from botocore.exceptions import ClientError +from nose.tools import assert_raises + +from moto import mock_iam + +invalid_policy_document_test_cases = [ + { + "document": "This is not a json document", + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2008-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2013-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": ["afd"] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + "Extra field": "value" + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Extra field": "value" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": ["cd3a324d2343d942772346-34234234423404-4c2242343242349d1642ee"], + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Id": {}, + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "invalid", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Conditions must be prefaced by a vendor, e.g., iam, sdb, ec2, etc.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "a a:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Vendor a a is not valid' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:List:Bucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s:3s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Actions/Condition can contain only one colon.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "invalid resource" + } + }, + "error_message": 'Resource invalid resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableDisableHongKong", + "Effect": "Allow", + "Action": [ + "account:EnableRegion", + "account:DisableRegion" + ], + "Resource": "", + "Condition": { + "StringEquals": {"account:TargetRegion": "ap-east-1"} + } + }, + { + "Sid": "ViewConsole", + "Effect": "Allow", + "Action": [ + "aws-portal:ViewAccount", + "account:ListRegions" + ], + "Resource": "" + } + ] + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s:3:ListBucket", + "Resource": "sdfsadf" + } + }, + "error_message": 'Resource sdfsadf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["adf"] + } + }, + "error_message": 'Resource adf must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "" + } + }, + "error_message": 'Resource must be in ARN format or "*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:bsdfdsafsad" + } + }, + "error_message": 'Partition "bsdfdsafsad" is not valid for resource "arn:bsdfdsafsad:*:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:cadfsdf" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:cadfsdf:*:*:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Resource": "a:b:c:d:e:f:g:h" + } + }, + "error_message": 'Partition "b" is not valid for resource "arn:b:c:d:e:f:g:h".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "aws:s3:::example_bucket" + } + }, + "error_message": 'Partition "s3" is not valid for resource "arn:s3:::example_bucket:*".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:error:s3:::example_bucket", + "arn:error:s3::example_bucket" + ] + } + }, + "error_message": 'Partition "error" is not valid for resource "arn:error:s3:::example_bucket".' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [] + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [] + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "invalid" + } + }, + "error_message": 'Policy statement must contain resources.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Deny" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + "error_message": 'Policy statement must contain actions.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::example_bucket" + } + }, + "error_message": 'IAM resource path must either be "*" or start with user/, federated-user/, role/, group/, instance-profile/, mfa/, server-certificate/, policy/, sms-mfa/, saml-provider/, oidc-provider/, report/, access-report/.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Resource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws" + } + }, + "error_message": 'Resource vendor must be fully qualified and cannot contain regexes.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": { + "a": "arn:aws:s3:::example_bucket" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:ListBucket", + "Resource": ["adfdf", {}] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": [[]], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3s:ListBucket", + "Action": [], + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": {}, + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": "a" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": "b" + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": [] + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": {}} + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + "a": "1" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue::StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": [ + {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + ] + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:us-east-1::example_bucket" + } + }, + "error_message": 'IAM resource arn:aws:iam:us-east-1::example_bucket cannot contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'Resource arn:aws:s3:us-east-1::example_bucket can not contain region information.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": {}, + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Sid": [], + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Statement IDs (SID) in a single policy must be unique.' + }, + { + "document": { + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Action": "iam:dsf", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "NotResource": "*" + } + }, + "error_message": 'Syntax errors in policy.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Statement": { + "Effect": "denY", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + "error_message": 'Policy document must be version 2012-10-17 or greater.' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Condition": { + "DateGreaterThan": {"a": "sdfdsf"} + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws::::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": + { + "Effect": "allow", + "Resource": "arn:aws:s3:us-east-1::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "aLLow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Sid": "sdf", + "Effect": "Allow" + } + ] + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "NotResource": "arn:aws:s3::example_bucket" + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "234-13" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+1" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.1999999999+10:59" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "9223372036854775808" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:error:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "sdfdsf" + } + } + } + }, + "error_message": 'The policy failed legacy parsing' + }, + { + "document": { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws::fdsasf" + } + }, + "error_message": 'The policy failed legacy parsing' + } +] + +valid_policy_documents = [ + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam: asdf safdsf af ", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket", + "*" + ] + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "service-prefix:action-name", + "Resource": "*", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "fsx:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:iam:::user/example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s33:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:fdsasf" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": {"ForAllValues:StringEquals": {"aws:TagKeys": "Department"}} + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:cloudwatch:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:ec2:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:invalid-service:us-east-1::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"aws:CurrentTime": "2017-07-01T00:00:00Z"}, + "DateLessThan": {"aws:CurrentTime": "2017-12-31T23:59:59Z"} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": {"a": []} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "a": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Sid": "dsfsdfsdfsdfsdfsadfsd", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleDisplay", + "Effect": "Allow", + "Action": [ + "iam:GetRole", + "iam:GetUser", + "iam:ListRoles", + "iam:ListRoleTags", + "iam:ListUsers", + "iam:ListUserTags" + ], + "Resource": "*" + }, + { + "Sid": "AddTag", + "Effect": "Allow", + "Action": [ + "iam:TagUser", + "iam:TagRole" + ], + "Resource": "*", + "Condition": { + "StringEquals": { + "aws:RequestTag/CostCenter": [ + "A-123", + "B-456" + ] + }, + "ForAllValues:StringEquals": {"aws:TagKeys": "CostCenter"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Deny", + "Action": "s3:*", + "NotResource": [ + "arn:aws:s3:::HRBucket/Payroll", + "arn:aws:s3:::HRBucket/Payroll/*" + ] + } + }, + { + "Version": "2012-10-17", + "Id": "sdfsdfsdf", + "Statement": { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "aaaaaadsfdsafsadfsadfaaaaa:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3-s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "Action": "s3.s:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + }, + { + "Version": "2012-10-17", + "Statement": + { + "Effect": "Allow", + "NotAction": "s3:ListBucket", + "NotResource": "*" + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "sdf", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateGreaterThan": { + "a": "01T" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "x": { + }, + "y": {} + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "ForAnyValue:StringEqualsIfExists": { + "a": "asf" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2019-07-01T13:20:15Z" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13T21:20:37.593194+00:00" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThanEquals": { + "a": "2016-12-13t2:00:00.593194+23" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket", + "Condition": { + "DateLessThan": { + "a": "-292275054" + } + } + } + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary", + "iam:ListVirtualMFADevices" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnVirtualMFADevice", + "Effect": "Allow", + "Action": [ + "iam:CreateVirtualMFADevice", + "iam:DeleteVirtualMFADevice" + ], + "Resource": "arn:aws:iam::*:mfa/${aws:username}" + }, + { + "Sid": "AllowManageOwnUserMFA", + "Effect": "Allow", + "Action": [ + "iam:DeactivateMFADevice", + "iam:EnableMFADevice", + "iam:ListMFADevices", + "iam:ResyncMFADevice" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "DenyAllExceptListedIfNoMFA", + "Effect": "Deny", + "NotAction": [ + "iam:CreateVirtualMFADevice", + "iam:EnableMFADevice", + "iam:GetUser", + "iam:ListMFADevices", + "iam:ListVirtualMFADevices", + "iam:ResyncMFADevice", + "sts:GetSessionToken" + ], + "Resource": "*", + "Condition": { + "BoolIfExists": { + "aws:MultiFactorAuthPresent": "false" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListAndDescribe", + "Effect": "Allow", + "Action": [ + "dynamodb:List*", + "dynamodb:DescribeReservedCapacity*", + "dynamodb:DescribeLimits", + "dynamodb:DescribeTimeToLive" + ], + "Resource": "*" + }, + { + "Sid": "SpecificTable", + "Effect": "Allow", + "Action": [ + "dynamodb:BatchGet*", + "dynamodb:DescribeStream", + "dynamodb:DescribeTable", + "dynamodb:Get*", + "dynamodb:Query", + "dynamodb:Scan", + "dynamodb:BatchWrite*", + "dynamodb:CreateTable", + "dynamodb:Delete*", + "dynamodb:Update*", + "dynamodb:PutItem" + ], + "Resource": "arn:aws:dynamodb:*:*:table/MyTable" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:*:*:volume/*", + "arn:aws:ec2:*:*:instance/*" + ], + "Condition": { + "ArnEquals": {"ec2:SourceInstanceARN": "arn:aws:ec2:*:*:instance/instance-id"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/Department": "Development"} + } + }, + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": "arn:aws:ec2:*:*:volume/*", + "Condition": { + "StringEquals": {"ec2:ResourceTag/VolumeUser": "${aws:username}"} + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "StartStopIfTags", + "Effect": "Allow", + "Action": [ + "ec2:StartInstances", + "ec2:StopInstances", + "ec2:DescribeTags" + ], + "Resource": "arn:aws:ec2:region:account-id:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Project": "DataAnalytics", + "aws:PrincipalTag/Department": "Data" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListYourObjects", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"], + "Condition": { + "StringLike": { + "s3:prefix": ["cognito/application-name/${cognito-identity.amazonaws.com:sub}"] + } + } + }, + { + "Sid": "ReadWriteDeleteYourObjects", + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}", + "arn:aws:s3:::bucket-name/cognito/application-name/${cognito-identity.amazonaws.com:sub}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::bucket-name", + "Condition": { + "StringLike": { + "s3:prefix": [ + "", + "home/", + "home/${aws:userid}/*" + ] + } + } + }, + { + "Effect": "Allow", + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::bucket-name/home/${aws:userid}", + "arn:aws:s3:::bucket-name/home/${aws:userid}/*" + ] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ConsoleAccess", + "Effect": "Allow", + "Action": [ + "s3:GetAccountPublicAccessBlock", + "s3:GetBucketAcl", + "s3:GetBucketLocation", + "s3:GetBucketPolicyStatus", + "s3:GetBucketPublicAccessBlock", + "s3:ListAllMyBuckets" + ], + "Resource": "*" + }, + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": ["arn:aws:s3:::bucket-name"] + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": ["arn:aws:s3:::bucket-name/*"] + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowViewAccountInfo", + "Effect": "Allow", + "Action": [ + "iam:GetAccountPasswordPolicy", + "iam:GetAccountSummary" + ], + "Resource": "*" + }, + { + "Sid": "AllowManageOwnPasswords", + "Effect": "Allow", + "Action": [ + "iam:ChangePassword", + "iam:GetUser" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnAccessKeys", + "Effect": "Allow", + "Action": [ + "iam:CreateAccessKey", + "iam:DeleteAccessKey", + "iam:ListAccessKeys", + "iam:UpdateAccessKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSigningCertificates", + "Effect": "Allow", + "Action": [ + "iam:DeleteSigningCertificate", + "iam:ListSigningCertificates", + "iam:UpdateSigningCertificate", + "iam:UploadSigningCertificate" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnSSHPublicKeys", + "Effect": "Allow", + "Action": [ + "iam:DeleteSSHPublicKey", + "iam:GetSSHPublicKey", + "iam:ListSSHPublicKeys", + "iam:UpdateSSHPublicKey", + "iam:UploadSSHPublicKey" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + }, + { + "Sid": "AllowManageOwnGitCredentials", + "Effect": "Allow", + "Action": [ + "iam:CreateServiceSpecificCredential", + "iam:DeleteServiceSpecificCredential", + "iam:ListServiceSpecificCredentials", + "iam:ResetServiceSpecificCredential", + "iam:UpdateServiceSpecificCredential" + ], + "Resource": "arn:aws:iam::*:user/${aws:username}" + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": "ec2:*", + "Resource": "*", + "Effect": "Allow", + "Condition": { + "StringEquals": { + "ec2:Region": "region" + } + } + } + ] + }, + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "rds:*", + "Resource": ["arn:aws:rds:region:*:*"] + }, + { + "Effect": "Allow", + "Action": ["rds:Describe*"], + "Resource": ["*"] + } + ] + } +] + + +def test_create_policy_with_invalid_policy_documents(): + for test_case in invalid_policy_document_test_cases: + yield check_create_policy_with_invalid_policy_document, test_case + + +def test_create_policy_with_valid_policy_documents(): + for valid_policy_document in valid_policy_documents: + yield check_create_policy_with_valid_policy_document, valid_policy_document + + +@mock_iam +def check_create_policy_with_invalid_policy_document(test_case): + conn = boto3.client('iam', region_name='us-east-1') + with assert_raises(ClientError) as ex: + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(test_case["document"])) + ex.exception.response['Error']['Code'].should.equal('MalformedPolicyDocument') + ex.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + ex.exception.response['Error']['Message'].should.equal(test_case["error_message"]) + + +@mock_iam +def check_create_policy_with_valid_policy_document(valid_policy_document): + conn = boto3.client('iam', region_name='us-east-1') + conn.create_policy( + PolicyName="TestCreatePolicy", + PolicyDocument=json.dumps(valid_policy_document)) diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py new file mode 100644 index 000000000..37f79a8b0 --- /dev/null +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -0,0 +1,114 @@ +from __future__ import unicode_literals + +import boto3 +import json +from botocore.exceptions import ClientError +from six.moves.email_mime_multipart import MIMEMultipart +from six.moves.email_mime_text import MIMEText + +import sure # noqa +from nose import tools +from moto import mock_ses, mock_sns, mock_sqs +from moto.ses.models import SESFeedback + + +@mock_ses +def test_enable_disable_ses_sns_communication(): + conn = boto3.client('ses', region_name='us-east-1') + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce', + SnsTopic='the-arn' + ) + conn.set_identity_notification_topic( + Identity='test.com', + NotificationType='Bounce' + ) + + +def __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region, expected_msg): + """Setup the AWS environment to test the SES SNS Feedback""" + # Environment setup + # Create SQS queue + sqs_conn.create_queue(QueueName=queue) + # Create SNS topic + create_topic_response = sns_conn.create_topic(Name=topic) + topic_arn = create_topic_response["TopicArn"] + # Subscribe the SNS topic to the SQS queue + sns_conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:%s:123456789012:%s" % (region, queue)) + # Verify SES domain + ses_conn.verify_domain_identity(Domain=domain) + # Setup SES notification topic + if expected_msg is not None: + ses_conn.set_identity_notification_topic( + Identity=domain, + NotificationType=expected_msg, + SnsTopic=topic_arn + ) + + +def __test_sns_feedback__(addr, expected_msg): + region_name = "us-east-1" + ses_conn = boto3.client('ses', region_name=region_name) + sns_conn = boto3.client('sns', region_name=region_name) + sqs_conn = boto3.resource('sqs', region_name=region_name) + domain = "example.com" + topic = "bounce-arn-feedback" + queue = "feedback-test-queue" + + __setup_feedback_env__(ses_conn, sns_conn, sqs_conn, domain, topic, queue, region_name, expected_msg) + + # Send the message + kwargs = dict( + Source="test@" + domain, + Destination={ + "ToAddresses": [addr + "@" + domain], + "CcAddresses": ["test_cc@" + domain], + "BccAddresses": ["test_bcc@" + domain], + }, + Message={ + "Subject": {"Data": "test subject"}, + "Body": {"Text": {"Data": "test body"}} + } + ) + ses_conn.send_email(**kwargs) + + # Wait for messages in the queues + queue = sqs_conn.get_queue_by_name(QueueName=queue) + messages = queue.receive_messages(MaxNumberOfMessages=1) + if expected_msg is not None: + msg = messages[0].body + msg = json.loads(msg) + assert msg["Message"] == SESFeedback.generate_message(expected_msg) + else: + assert len(messages) == 0 + + +@mock_sqs +@mock_sns +@mock_ses +def test_no_sns_feedback(): + __test_sns_feedback__("test", None) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_bounce(): + __test_sns_feedback__(SESFeedback.BOUNCE_ADDR, SESFeedback.BOUNCE) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_complaint(): + __test_sns_feedback__(SESFeedback.COMPLAINT_ADDR, SESFeedback.COMPLAINT) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_delivery(): + __test_sns_feedback__(SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY) diff --git a/update_version_from_git.py b/update_version_from_git.py new file mode 100644 index 000000000..d72dc4ae9 --- /dev/null +++ b/update_version_from_git.py @@ -0,0 +1,120 @@ +""" +Adapted from https://github.com/pygame/pygameweb/blob/master/pygameweb/builds/update_version_from_git.py + +For updating the version from git. +__init__.py contains a __version__ field. +Update that. +If we are on master, we want to update the version as a pre-release. +git describe --tags +With these: + __init__.py + __version__= '0.0.2' + git describe --tags + 0.0.1-22-g729a5ae +We want this: + __init__.py + __version__= '0.0.2.dev22.g729a5ae' +Get the branch/tag name with this. + git symbolic-ref -q --short HEAD || git describe --tags --exact-match +""" + +import io +import os +import re +import subprocess + + +def migrate_source_attribute(attr, to_this, target_file, regex): + """Updates __magic__ attributes in the source file""" + change_this = re.compile(regex, re.S) + new_file = [] + found = False + + with open(target_file, 'r') as fp: + lines = fp.readlines() + + for line in lines: + if line.startswith(attr): + found = True + line = re.sub(change_this, to_this, line) + new_file.append(line) + + if found: + with open(target_file, 'w') as fp: + fp.writelines(new_file) + +def migrate_version(target_file, new_version): + """Updates __version__ in the source file""" + regex = r"['\"](.*)['\"]" + migrate_source_attribute('__version__', "'{new_version}'".format(new_version=new_version), target_file, regex) + + +def is_master_branch(): + cmd = ('git rev-parse --abbrev-ref HEAD') + tag_branch = subprocess.check_output(cmd, shell=True) + return tag_branch in [b'master\n'] + +def git_tag_name(): + cmd = ('git describe --tags') + tag_branch = subprocess.check_output(cmd, shell=True) + tag_branch = tag_branch.decode().strip() + return tag_branch + +def get_git_version_info(): + cmd = 'git describe --tags' + ver_str = subprocess.check_output(cmd, shell=True) + ver, commits_since, githash = ver_str.decode().strip().split('-') + return ver, commits_since, githash + +def prerelease_version(): + """ return what the prerelease version should be. + https://packaging.python.org/tutorials/distributing-packages/#pre-release-versioning + 0.0.2.dev22 + """ + ver, commits_since, githash = get_git_version_info() + initpy_ver = get_version() + + assert len(initpy_ver.split('.')) in [3, 4], 'moto/__init__.py version should be like 0.0.2.dev' + assert initpy_ver > ver, 'the moto/__init__.py version should be newer than the last tagged release.' + return '{initpy_ver}.{commits_since}'.format(initpy_ver=initpy_ver, commits_since=commits_since) + +def read(*parts): + """ Reads in file from *parts. + """ + try: + return io.open(os.path.join(*parts), 'r', encoding='utf-8').read() + except IOError: + return '' + +def get_version(): + """ Returns version from moto/__init__.py + """ + version_file = read('moto', '__init__.py') + version_match = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', + version_file, re.MULTILINE) + if version_match: + return version_match.group(1) + raise RuntimeError('Unable to find version string.') + + +def release_version_correct(): + """Makes sure the: + - prerelease verion for master is correct. + - release version is correct for tags. + """ + if is_master_branch(): + # update for a pre release version. + initpy = os.path.abspath("moto/__init__.py") + + new_version = prerelease_version() + print('updating version in __init__.py to {new_version}'.format(new_version=new_version)) + assert len(new_version.split('.')) >= 4, 'moto/__init__.py version should be like 0.0.2.dev' + migrate_version(initpy, new_version) + else: + assert False, "No non-master deployments yet" + # check that we are a tag with the same version as in __init__.py + assert get_version() == git_tag_name(), 'git tag/branch name not the same as moto/__init__.py __verion__' + + +if __name__ == '__main__': + release_version_correct() From b51d5ad65f27384233c21a0974cff64bd7476cd6 Mon Sep 17 00:00:00 2001 From: Niklas Janlert Date: Thu, 28 Mar 2019 15:10:57 +0100 Subject: [PATCH 021/125] Support x-amz-tagging-directive in s3 copy_object --- moto/s3/responses.py | 4 ++++ tests/test_s3/test_s3.py | 28 ++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index fd3a7b2db..5c985f7a3 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1051,6 +1051,10 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): if mdirective is not None and mdirective == "REPLACE": metadata = metadata_from_headers(request.headers) new_key.set_metadata(metadata, replace=True) + tdirective = request.headers.get("x-amz-tagging-directive") + if tdirective == "REPLACE": + tagging = self._tagging_from_headers(request.headers) + new_key.set_tagging(tagging) template = self.response_template(S3_OBJECT_COPY_RESPONSE) response_headers.update(new_key.response_dict) return 200, response_headers, template.render(key=new_key) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 8f3c3538c..cf3ae71c8 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1727,6 +1727,34 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): obj2_version_new.should_not.equal(None) +@mock_s3 +def test_boto3_copy_object_with_replacement_tagging(): + client = boto3.client("s3", region_name="eu-north-1") + client.create_bucket(Bucket="mybucket") + client.put_object( + Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old" + ) + + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy1", + TaggingDirective="REPLACE", + Tagging="tag=new", + ) + client.copy_object( + CopySource={"Bucket": "mybucket", "Key": "original"}, + Bucket="mybucket", + Key="copy2", + TaggingDirective="COPY", + ) + + tags1 = client.get_object_tagging(Bucket="mybucket", Key="copy1")["TagSet"] + tags1.should.equal([{"Key": "tag", "Value": "new"}]) + tags2 = client.get_object_tagging(Bucket="mybucket", Key="copy2")["TagSet"] + tags2.should.equal([{"Key": "tag", "Value": "old"}]) + + @mock_s3 def test_boto3_deleted_versionings_list(): client = boto3.client("s3", region_name="us-east-1") From a6aa0f6dbf0b02b0d9e2b644ae83bd6d6f263612 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 08:46:37 +0100 Subject: [PATCH 022/125] Update models.py --- moto/iot/models.py | 170 ++++++++++++++++++++++----------------------- 1 file changed, 84 insertions(+), 86 deletions(-) diff --git a/moto/iot/models.py b/moto/iot/models.py index b2599de1d..3c3e0cfe2 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -30,7 +30,7 @@ class FakeThing(BaseModel): self.attributes = attributes self.arn = "arn:aws:iot:%s:1:thing/%s" % (self.region_name, thing_name) self.version = 1 - # TODO: we need to handle 'version'? + # TODO: we need to handle "version"? # for iot-data self.thing_shadow = None @@ -97,7 +97,7 @@ class FakeThingGroup(BaseModel): break # if parent arn found (should always be found) if parent_thing_group_structure: - # copy parent's rootToParentThingGroups + # copy parent"s rootToParentThingGroups if "rootToParentThingGroups" in parent_thing_group_structure.metadata: self.metadata["rootToParentThingGroups"].extend( parent_thing_group_structure.metadata["rootToParentThingGroups"] @@ -175,27 +175,27 @@ class FakeCertificate(BaseModel): class FakePolicy(BaseModel): - def __init__(self, name, document, region_name, default_version_id='1'): + def __init__(self, name, document, region_name, default_version_id="1"): self.name = name self.document = document - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, name) self.default_version_id = default_version_id self.versions = [FakePolicyVersion(self.name, document, True, region_name)] def to_get_dict(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'defaultVersionId': self.default_version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "defaultVersionId": self.default_version_id } def to_dict_at_creation(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.default_version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.default_version_id } def to_dict(self): @@ -210,39 +210,39 @@ class FakePolicyVersion(object): is_default, region_name): self.name = policy_name - self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, policy_name) + self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name) self.document = document or {} self.is_default = is_default - self.version_id = '1' + self.version_id = "1" self.create_datetime = time.mktime(datetime(2015, 1, 1).timetuple()) self.last_modified_datetime = time.mktime(datetime(2015, 1, 2).timetuple()) def to_get_dict(self): return { - 'policyName': self.name, - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'creationDate': self.create_datetime, - 'lastModifiedDate': self.last_modified_datetime, - 'generationId': self.version_id + "policyName": self.name, + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default, + "creationDate": self.create_datetime, + "lastModifiedDate": self.last_modified_datetime, + "generationId": self.version_id } def to_dict_at_creation(self): return { - 'policyArn': self.arn, - 'policyDocument': self.document, - 'policyVersionId': self.version_id, - 'isDefaultVersion': self.is_default + "policyArn": self.arn, + "policyDocument": self.document, + "policyVersionId": self.version_id, + "isDefaultVersion": self.is_default } def to_dict(self): return { - 'versionId': self.version_id, - 'isDefaultVersion': self.is_default, - 'createDate': self.create_datetime, + "versionId": self.version_id, + "isDefaultVersion": self.is_default, + "createDate": self.create_datetime, } @@ -277,7 +277,7 @@ class FakeJob(BaseModel): self.presigned_url_config = presigned_url_config self.target_selection = target_selection self.job_executions_rollout_config = job_executions_rollout_config - self.status = 'QUEUED' # IN_PROGRESS | CANCELED | COMPLETED + self.status = "QUEUED" # IN_PROGRESS | CANCELED | COMPLETED self.comment = None self.reason_code = None self.created_at = time.mktime(datetime(2015, 1, 1).timetuple()) @@ -297,24 +297,24 @@ class FakeJob(BaseModel): def to_dict(self): obj = { - 'jobArn': self.job_arn, - 'jobId': self.job_id, - 'targets': self.targets, - 'description': self.description, - 'presignedUrlConfig': self.presigned_url_config, - 'targetSelection': self.target_selection, - 'jobExecutionsRolloutConfig': self.job_executions_rollout_config, - 'status': self.status, - 'comment': self.comment, - 'forceCanceled': self.force, - 'reasonCode': self.reason_code, - 'createdAt': self.created_at, - 'lastUpdatedAt': self.last_updated_at, - 'completedAt': self.completed_at, - 'jobProcessDetails': self.job_process_details, - 'documentParameters': self.document_parameters, - 'document': self.document, - 'documentSource': self.document_source + "jobArn": self.job_arn, + "jobId": self.job_id, + "targets": self.targets, + "description": self.description, + "presignedUrlConfig": self.presigned_url_config, + "targetSelection": self.target_selection, + "jobExecutionsRolloutConfig": self.job_executions_rollout_config, + "status": self.status, + "comment": self.comment, + "forceCanceled": self.force, + "reasonCode": self.reason_code, + "createdAt": self.created_at, + "lastUpdatedAt": self.last_updated_at, + "completedAt": self.completed_at, + "jobProcessDetails": self.job_process_details, + "documentParameters": self.document_parameters, + "document": self.document, + "documentSource": self.document_source } return obj @@ -327,7 +327,7 @@ class FakeJob(BaseModel): class FakeJobExecution(BaseModel): - def __init__(self, job_id, thing_arn, status='QUEUED', force_canceled=False, status_details_map={}): + def __init__(self, job_id, thing_arn, status="QUEUED", force_canceled=False, status_details_map={}): self.job_id = job_id self.status = status # IN_PROGRESS | CANCELED | COMPLETED self.force_canceled = force_canceled @@ -342,31 +342,31 @@ class FakeJobExecution(BaseModel): def to_get_dict(self): obj = { - 'jobId': self.job_id, - 'status': self.status, - 'forceCanceled': self.force_canceled, - 'statusDetails': {'detailsMap': self.status_details_map}, - 'thingArn': self.thing_arn, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, - 'versionNumber': self.version_number, - 'approximateSecondsBeforeTimedOut': self.approximate_seconds_before_time_out + "jobId": self.job_id, + "status": self.status, + "forceCanceled": self.force_canceled, + "statusDetails": {"detailsMap": self.status_details_map}, + "thingArn": self.thing_arn, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, + "versionNumber": self.version_number, + "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out } return obj def to_dict(self): obj = { - 'jobId': self.job_id, - 'thingArn': self.thing_arn, - 'jobExecutionSummary': { - 'status': self.status, - 'queuedAt': self.queued_at, - 'startedAt': self.started_at, - 'lastUpdatedAt': self.last_updated_at, - 'executionNumber': self.execution_number, + "jobId": self.job_id, + "thingArn": self.thing_arn, + "jobExecutionSummary": { + "status": self.status, + "queuedAt": self.queued_at, + "startedAt": self.started_at, + "lastUpdatedAt": self.last_updated_at, + "executionNumber": self.execution_number, } } @@ -423,7 +423,7 @@ class IoTBackend(BaseBackend): def list_thing_types(self, thing_type_name=None): if thing_type_name: - # It's weird but thing_type_name is filtered by forward match, not complete match + # It"s weird but thing_type_name is filtered by forward match, not complete match return [ _ for _ in self.thing_types.values() @@ -686,7 +686,7 @@ class IoTBackend(BaseBackend): raise ResourceNotFoundException() version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) policy.versions.append(version) - version.version_id = '{0}'.format(len(policy.versions)) + version.version_id = "{0}".format(len(policy.versions)) if set_as_default: self.set_default_policy_version(policy_name, version.version_id) return version @@ -976,7 +976,7 @@ class IoTBackend(BaseBackend): self.jobs[job_id] = job for thing_arn in targets: - thing_name = thing_arn.split(':')[-1].split('/')[-1] + thing_name = thing_arn.split(":")[-1].split("/")[-1] job_execution = FakeJobExecution(job_id, thing_arn) self.job_executions[(job_id, thing_name)] = job_execution return job.job_arn, job_id, description @@ -990,9 +990,9 @@ class IoTBackend(BaseBackend): def delete_job(self, job_id, force): job = self.jobs[job_id] - if job.status == 'IN_PROGRESS' and force: + if job.status == "IN_PROGRESS" and force: del self.jobs[job_id] - elif job.status != 'IN_PROGRESS': + elif job.status != "IN_PROGRESS": del self.jobs[job_id] else: raise InvalidStateTransitionException() @@ -1003,11 +1003,11 @@ class IoTBackend(BaseBackend): job.reason_code = reason_code if reason_code is not None else job.reason_code job.comment = comment if comment is not None else job.comment job.force = force if force is not None and force != job.force else job.force - job.status = 'CANCELED' + job.status = "CANCELED" - if job.status == 'IN_PROGRESS' and force: + if job.status == "IN_PROGRESS" and force: self.jobs[job_id] = job - elif job.status != 'IN_PROGRESS': + elif job.status != "IN_PROGRESS": self.jobs[job_id] = job else: raise InvalidStateTransitionException() @@ -1053,11 +1053,11 @@ class IoTBackend(BaseBackend): job_execution.force_canceled = force if force is not None else job_execution.force_canceled # TODO: implement expected_version and status_details (at most 10 can be specified) - if job_execution.status == 'IN_PROGRESS' and force: - job_execution.status = 'CANCELED' + if job_execution.status == "IN_PROGRESS" and force: + job_execution.status = "CANCELED" self.job_executions[(job_id, thing_name)] = job_execution - elif job_execution.status != 'IN_PROGRESS': - job_execution.status = 'CANCELED' + elif job_execution.status != "IN_PROGRESS": + job_execution.status = "CANCELED" self.job_executions[(job_id, thing_name)] = job_execution else: raise InvalidStateTransitionException() @@ -1068,9 +1068,9 @@ class IoTBackend(BaseBackend): if job_execution.execution_number != execution_number: raise ResourceNotFoundException() - if job_execution.status == 'IN_PROGRESS' and force: + if job_execution.status == "IN_PROGRESS" and force: del self.job_executions[(job_id, thing_name)] - elif job_execution.status != 'IN_PROGRESS': + elif job_execution.status != "IN_PROGRESS": del self.job_executions[(job_id, thing_name)] else: raise InvalidStateTransitionException() @@ -1080,8 +1080,7 @@ class IoTBackend(BaseBackend): if status is not None: job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) + status in elem["status"] and elem["status"] == status, job_executions)) token = next_token if token is None: @@ -1099,8 +1098,7 @@ class IoTBackend(BaseBackend): if status is not None: job_executions = list(filter(lambda elem: - status in elem["status"] and - elem["status"] == status, job_executions)) + status in elem["status"] and elem["status"] == status, job_executions)) token = next_token if token is None: From ed8d5edb5070c6c815c83fbd547d62ad8181ea7d Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 09:01:53 +0100 Subject: [PATCH 023/125] fix linting errors --- moto/iot/exceptions.py | 2 +- moto/iot/models.py | 120 +++++++++++----- moto/iot/responses.py | 172 ++++++++++++---------- tests/test_iot/test_iot.py | 285 ++++++++++++++++++++++--------------- 4 files changed, 347 insertions(+), 232 deletions(-) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 2854fbb17..d114a12ad 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -27,7 +27,7 @@ class InvalidStateTransitionException(IoTClientError): self.code = 409 super(InvalidStateTransitionException, self).__init__( "InvalidStateTransitionException", - msg or "An attempt was made to change to an invalid state." + msg or "An attempt was made to change to an invalid state.", ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 3c3e0cfe2..37e9b4ef9 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -18,7 +18,7 @@ from .exceptions import ( ResourceNotFoundException, InvalidRequestException, InvalidStateTransitionException, - VersionConflictException + VersionConflictException, ) @@ -187,7 +187,7 @@ class FakePolicy(BaseModel): "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "defaultVersionId": self.default_version_id + "defaultVersionId": self.default_version_id, } def to_dict_at_creation(self): @@ -195,7 +195,7 @@ class FakePolicy(BaseModel): "policyName": self.name, "policyArn": self.arn, "policyDocument": self.document, - "policyVersionId": self.default_version_id + "policyVersionId": self.default_version_id, } def to_dict(self): @@ -203,12 +203,7 @@ class FakePolicy(BaseModel): class FakePolicyVersion(object): - - def __init__(self, - policy_name, - document, - is_default, - region_name): + def __init__(self, policy_name, document, is_default, region_name): self.name = policy_name self.arn = "arn:aws:iot:%s:1:policy/%s" % (region_name, policy_name) self.document = document or {} @@ -227,7 +222,7 @@ class FakePolicyVersion(object): "isDefaultVersion": self.is_default, "creationDate": self.create_datetime, "lastModifiedDate": self.last_modified_datetime, - "generationId": self.version_id + "generationId": self.version_id, } def to_dict_at_creation(self): @@ -235,7 +230,7 @@ class FakePolicyVersion(object): "policyArn": self.arn, "policyDocument": self.document, "policyVersionId": self.version_id, - "isDefaultVersion": self.is_default + "isDefaultVersion": self.is_default, } def to_dict(self): @@ -314,7 +309,7 @@ class FakeJob(BaseModel): "jobProcessDetails": self.job_process_details, "documentParameters": self.document_parameters, "document": self.document, - "documentSource": self.document_source + "documentSource": self.document_source, } return obj @@ -326,8 +321,14 @@ class FakeJob(BaseModel): class FakeJobExecution(BaseModel): - - def __init__(self, job_id, thing_arn, status="QUEUED", force_canceled=False, status_details_map={}): + def __init__( + self, + job_id, + thing_arn, + status="QUEUED", + force_canceled=False, + status_details_map={}, + ): self.job_id = job_id self.status = status # IN_PROGRESS | CANCELED | COMPLETED self.force_canceled = force_canceled @@ -352,7 +353,7 @@ class FakeJobExecution(BaseModel): "lastUpdatedAt": self.last_updated_at, "executionNumber": self.execution_number, "versionNumber": self.version_number, - "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out + "approximateSecondsBeforeTimedOut": self.approximate_seconds_before_time_out, } return obj @@ -367,7 +368,7 @@ class FakeJobExecution(BaseModel): "startedAt": self.started_at, "lastUpdatedAt": self.last_updated_at, "executionNumber": self.execution_number, - } + }, } return obj @@ -684,7 +685,9 @@ class IoTBackend(BaseBackend): policy = self.get_policy(policy_name) if not policy: raise ResourceNotFoundException() - version = FakePolicyVersion(policy_name, policy_document, set_as_default, self.region_name) + version = FakePolicyVersion( + policy_name, policy_document, set_as_default, self.region_name + ) policy.versions.append(version) version.version_id = "{0}".format(len(policy.versions)) if set_as_default: @@ -724,7 +727,8 @@ class IoTBackend(BaseBackend): raise ResourceNotFoundException() if version_id == policy.default_version_id: raise InvalidRequestException( - "Cannot delete the default version of a policy") + "Cannot delete the default version of a policy" + ) for i, v in enumerate(policy.versions): if v.version_id == version_id: del policy.versions[i] @@ -1017,7 +1021,15 @@ class IoTBackend(BaseBackend): def get_job_document(self, job_id): return self.jobs[job_id] - def list_jobs(self, status, target_selection, max_results, token, thing_group_name, thing_group_id): + def list_jobs( + self, + status, + target_selection, + max_results, + token, + thing_group_name, + thing_group_id, + ): # TODO: implement filters all_jobs = [_.to_dict() for _ in self.jobs.values()] filtered_jobs = all_jobs @@ -1027,8 +1039,12 @@ class IoTBackend(BaseBackend): next_token = str(max_results) if len(filtered_jobs) > max_results else None else: token = int(token) - jobs = filtered_jobs[token:token + max_results] - next_token = str(token + max_results) if len(filtered_jobs) > token + max_results else None + jobs = filtered_jobs[token : token + max_results] + next_token = ( + str(token + max_results) + if len(filtered_jobs) > token + max_results + else None + ) return jobs, next_token @@ -1038,19 +1054,25 @@ class IoTBackend(BaseBackend): except KeyError: raise ResourceNotFoundException() - if job_execution is None or \ - (execution_number is not None and job_execution.execution_number != execution_number): + if job_execution is None or ( + execution_number is not None + and job_execution.execution_number != execution_number + ): raise ResourceNotFoundException() return job_execution - def cancel_job_execution(self, job_id, thing_name, force, expected_version, status_details): + def cancel_job_execution( + self, job_id, thing_name, force, expected_version, status_details + ): job_execution = self.job_executions[(job_id, thing_name)] if job_execution is None: raise ResourceNotFoundException() - job_execution.force_canceled = force if force is not None else job_execution.force_canceled + job_execution.force_canceled = ( + force if force is not None else job_execution.force_canceled + ) # TODO: implement expected_version and status_details (at most 10 can be specified) if job_execution.status == "IN_PROGRESS" and force: @@ -1076,11 +1098,19 @@ class IoTBackend(BaseBackend): raise InvalidStateTransitionException() def list_job_executions_for_job(self, job_id, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[0] == job_id] + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[0] == job_id + ] if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and elem["status"] == status, job_executions)) + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) token = next_token if token is None: @@ -1088,17 +1118,31 @@ class IoTBackend(BaseBackend): next_token = str(max_results) if len(job_executions) > max_results else None else: token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) return job_executions, next_token - def list_job_executions_for_thing(self, thing_name, status, max_results, next_token): - job_executions = [self.job_executions[je].to_dict() for je in self.job_executions if je[1] == thing_name] + def list_job_executions_for_thing( + self, thing_name, status, max_results, next_token + ): + job_executions = [ + self.job_executions[je].to_dict() + for je in self.job_executions + if je[1] == thing_name + ] if status is not None: - job_executions = list(filter(lambda elem: - status in elem["status"] and elem["status"] == status, job_executions)) + job_executions = list( + filter( + lambda elem: status in elem["status"] and elem["status"] == status, + job_executions, + ) + ) token = next_token if token is None: @@ -1106,8 +1150,12 @@ class IoTBackend(BaseBackend): next_token = str(max_results) if len(job_executions) > max_results else None else: token = int(token) - job_executions = job_executions[token:token + max_results] - next_token = str(token + max_results) if len(job_executions) > token + max_results else None + job_executions = job_executions[token : token + max_results] + next_token = ( + str(token + max_results) + if len(job_executions) > token + max_results + else None + ) return job_executions, next_token diff --git a/moto/iot/responses.py b/moto/iot/responses.py index e88e9264a..c12d4b5c5 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -133,33 +133,35 @@ class IoTResponse(BaseResponse): def describe_job(self): job = self.iot_backend.describe_job(job_id=self._get_param("jobId")) - return json.dumps(dict( - documentSource=job.document_source, - job=dict( - comment=job.comment, - completedAt=job.completed_at, - createdAt=job.created_at, - description=job.description, - documentParameters=job.document_parameters, - forceCanceled=job.force, - reasonCode=job.reason_code, - jobArn=job.job_arn, - jobExecutionsRolloutConfig=job.job_executions_rollout_config, - jobId=job.job_id, - jobProcessDetails=job.job_process_details, - lastUpdatedAt=job.last_updated_at, - presignedUrlConfig=job.presigned_url_config, - status=job.status, - targets=job.targets, - targetSelection=job.target_selection - ))) + return json.dumps( + dict( + documentSource=job.document_source, + job=dict( + comment=job.comment, + completedAt=job.completed_at, + createdAt=job.created_at, + description=job.description, + documentParameters=job.document_parameters, + forceCanceled=job.force, + reasonCode=job.reason_code, + jobArn=job.job_arn, + jobExecutionsRolloutConfig=job.job_executions_rollout_config, + jobId=job.job_id, + jobProcessDetails=job.job_process_details, + lastUpdatedAt=job.last_updated_at, + presignedUrlConfig=job.presigned_url_config, + status=job.status, + targets=job.targets, + targetSelection=job.target_selection, + ), + ) + ) def delete_job(self): job_id = self._get_param("jobId") force = self._get_bool_param("force") - self.iot_backend.delete_job(job_id=job_id, - force=force) + self.iot_backend.delete_job(job_id=job_id, force=force) return json.dumps(dict()) @@ -169,10 +171,9 @@ class IoTResponse(BaseResponse): comment = self._get_param("comment") force = self._get_bool_param("force") - job = self.iot_backend.cancel_job(job_id=job_id, - reason_code=reason_code, - comment=comment, - force=force) + job = self.iot_backend.cancel_job( + job_id=job_id, reason_code=reason_code, comment=comment, force=force + ) return json.dumps(job.to_dict()) @@ -180,25 +181,29 @@ class IoTResponse(BaseResponse): job = self.iot_backend.get_job_document(job_id=self._get_param("jobId")) if job.document is not None: - return json.dumps({'document': job.document}) + return json.dumps({"document": job.document}) else: # job.document_source is not None: # TODO: needs to be implemented to get document_source's content from S3 - return json.dumps({'document': ''}) + return json.dumps({"document": ""}) def list_jobs(self): - status = self._get_param("status"), - target_selection = self._get_param("targetSelection"), - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + status = (self._get_param("status"),) + target_selection = (self._get_param("targetSelection"),) + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier previous_next_token = self._get_param("nextToken") - thing_group_name = self._get_param("thingGroupName"), + thing_group_name = (self._get_param("thingGroupName"),) thing_group_id = self._get_param("thingGroupId") - jobs, next_token = self.iot_backend.list_jobs(status=status, - target_selection=target_selection, - max_results=max_results, - token=previous_next_token, - thing_group_name=thing_group_name, - thing_group_id=thing_group_id) + jobs, next_token = self.iot_backend.list_jobs( + status=status, + target_selection=target_selection, + max_results=max_results, + token=previous_next_token, + thing_group_name=thing_group_name, + thing_group_id=thing_group_id, + ) return json.dumps(dict(jobs=jobs, nextToken=next_token)) @@ -206,9 +211,9 @@ class IoTResponse(BaseResponse): job_id = self._get_param("jobId") thing_name = self._get_param("thingName") execution_number = self._get_int_param("executionNumber") - job_execution = self.iot_backend.describe_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number) + job_execution = self.iot_backend.describe_job_execution( + job_id=job_id, thing_name=thing_name, execution_number=execution_number + ) return json.dumps(dict(execution=job_execution.to_get_dict())) @@ -219,11 +224,13 @@ class IoTResponse(BaseResponse): expected_version = self._get_int_param("expectedVersion") status_details = self._get_param("statusDetails") - self.iot_backend.cancel_job_execution(job_id=job_id, - thing_name=thing_name, - force=force, - expected_version=expected_version, - status_details=status_details) + self.iot_backend.cancel_job_execution( + job_id=job_id, + thing_name=thing_name, + force=force, + expected_version=expected_version, + status_details=status_details, + ) return json.dumps(dict()) @@ -233,34 +240,41 @@ class IoTResponse(BaseResponse): execution_number = self._get_int_param("executionNumber") force = self._get_bool_param("force") - self.iot_backend.delete_job_execution(job_id=job_id, - thing_name=thing_name, - execution_number=execution_number, - force=force) + self.iot_backend.delete_job_execution( + job_id=job_id, + thing_name=thing_name, + execution_number=execution_number, + force=force, + ) return json.dumps(dict()) def list_job_executions_for_job(self): job_id = self._get_param("jobId") status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_job(job_id=job_id, - status=status, - max_results=max_results, - next_token=next_token) + job_executions, next_token = self.iot_backend.list_job_executions_for_job( + job_id=job_id, status=status, max_results=max_results, next_token=next_token + ) return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) def list_job_executions_for_thing(self): thing_name = self._get_param("thingName") status = self._get_param("status") - max_results = self._get_int_param("maxResults", 50) # not the default, but makes testing easier + max_results = self._get_int_param( + "maxResults", 50 + ) # not the default, but makes testing easier next_token = self._get_param("nextToken") - job_executions, next_token = self.iot_backend.list_job_executions_for_thing(thing_name=thing_name, - status=status, - max_results=max_results, - next_token=next_token) + job_executions, next_token = self.iot_backend.list_job_executions_for_thing( + thing_name=thing_name, + status=status, + max_results=max_results, + next_token=next_token, + ) return json.dumps(dict(executionSummaries=job_executions, nextToken=next_token)) @@ -352,35 +366,39 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def create_policy_version(self): - policy_name = self._get_param('policyName') - policy_document = self._get_param('policyDocument') - set_as_default = self._get_bool_param('setAsDefault') - policy_version = self.iot_backend.create_policy_version(policy_name, policy_document, set_as_default) + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + set_as_default = self._get_bool_param("setAsDefault") + policy_version = self.iot_backend.create_policy_version( + policy_name, policy_document, set_as_default + ) return json.dumps(dict(policy_version.to_dict_at_creation())) def set_default_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") self.iot_backend.set_default_policy_version(policy_name, version_id) return json.dumps(dict()) def get_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") policy_version = self.iot_backend.get_policy_version(policy_name, version_id) return json.dumps(dict(policy_version.to_get_dict())) def list_policy_versions(self): - policy_name = self._get_param('policyName') - policiy_versions = self.iot_backend.list_policy_versions(policy_name=policy_name) + policy_name = self._get_param("policyName") + policiy_versions = self.iot_backend.list_policy_versions( + policy_name=policy_name + ) return json.dumps(dict(policyVersions=[_.to_dict() for _ in policiy_versions])) def delete_policy_version(self): - policy_name = self._get_param('policyName') - version_id = self._get_param('policyVersionId') + policy_name = self._get_param("policyName") + version_id = self._get_param("policyVersionId") self.iot_backend.delete_policy_version(policy_name, version_id) return json.dumps(dict()) @@ -392,15 +410,15 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def list_attached_policies(self): - principal = unquote(self._get_param('target')) + principal = unquote(self._get_param("target")) # marker = self._get_param("marker") # page_size = self._get_int_param("pageSize") - policies = self.iot_backend.list_attached_policies( - target=principal - ) + policies = self.iot_backend.list_attached_policies(target=principal) # TODO: implement pagination in the future next_marker = None - return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + return json.dumps( + dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker) + ) def attach_principal_policy(self): policy_name = self._get_param("policyName") diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 49a0af974..f8c4f579c 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -1,13 +1,14 @@ from __future__ import unicode_literals import json -import sure #noqa +import sure # noqa import boto3 from moto import mock_iot from botocore.exceptions import ClientError from nose.tools import assert_raises + @mock_iot def test_attach_policy(): client = boto3.client("iot", region_name="ap-northeast-1") @@ -68,67 +69,111 @@ def test_policy_versions(): policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none policy.should.have.key("policyDocument").which.should.equal(json.dumps({})) - policy.should.have.key("defaultVersionId").which.should.equal(policy["defaultVersionId"]) + policy.should.have.key("defaultVersionId").which.should.equal( + policy["defaultVersionId"] + ) - policy1 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({"version": "version_1"}), - setAsDefault=True) + policy1 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_1"}), + setAsDefault=True, + ) policy1.should.have.key("policyArn").which.should_not.be.none - policy1.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) + policy1.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) policy1.should.have.key("policyVersionId").which.should.equal("2") policy1.should.have.key("isDefaultVersion").which.should.equal(True) - policy2 = client.create_policy_version(policyName=policy_name, policyDocument=json.dumps({"version": "version_2"}), - setAsDefault=False) + policy2 = client.create_policy_version( + policyName=policy_name, + policyDocument=json.dumps({"version": "version_2"}), + setAsDefault=False, + ) policy2.should.have.key("policyArn").which.should_not.be.none - policy2.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_2"})) + policy2.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) policy2.should.have.key("policyVersionId").which.should.equal("3") policy2.should.have.key("isDefaultVersion").which.should.equal(False) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy1["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) - list(map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])) - default_policy[0].should.have.key("versionId").should.equal(policy1["policyVersionId"]) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy1["policyVersionId"] + ) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_1"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy1["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_1"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy1["policyVersionId"] + ) - client.set_default_policy_version(policyName=policy_name, policyVersionId=policy2["policyVersionId"]) + client.set_default_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(3) - list(map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])).count(True).should.equal(1) - default_policy = list(filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"])) - default_policy[0].should.have.key("versionId").should.equal(policy2["policyVersionId"]) + list( + map(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ).count(True).should.equal(1) + default_policy = list( + filter(lambda item: item["isDefaultVersion"], policy_versions["policyVersions"]) + ) + default_policy[0].should.have.key("versionId").should.equal( + policy2["policyVersionId"] + ) policy = client.get_policy(policyName=policy_name) policy.should.have.key("policyName").which.should.equal(policy_name) policy.should.have.key("policyArn").which.should_not.be.none - policy.should.have.key("policyDocument").which.should.equal(json.dumps({"version": "version_2"})) - policy.should.have.key("defaultVersionId").which.should.equal(policy2["policyVersionId"]) + policy.should.have.key("policyDocument").which.should.equal( + json.dumps({"version": "version_2"}) + ) + policy.should.have.key("defaultVersionId").which.should.equal( + policy2["policyVersionId"] + ) client.delete_policy_version(policyName=policy_name, policyVersionId="1") policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(2) - client.delete_policy_version(policyName=policy_name, policyVersionId=policy1["policyVersionId"]) + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy1["policyVersionId"] + ) policy_versions = client.list_policy_versions(policyName=policy_name) policy_versions.should.have.key("policyVersions").which.should.have.length_of(1) # should fail as it"s the default policy. Should use delete_policy instead try: - client.delete_policy_version(policyName=policy_name, policyVersionId=policy2["policyVersionId"]) + client.delete_policy_version( + policyName=policy_name, policyVersionId=policy2["policyVersionId"] + ) assert False, "Should have failed in previous call" except Exception as exception: - exception.response["Error"]["Message"].should.equal("Cannot delete the default version of a policy") + exception.response["Error"]["Message"].should.equal( + "Cannot delete the default version of a policy" + ) @mock_iot @@ -1159,9 +1204,7 @@ def test_list_jobs(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job1 = client.create_job( jobId=job_id, @@ -1170,12 +1213,10 @@ def test_list_jobs(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job1.should.have.key("jobId").which.should.equal(job_id) @@ -1183,21 +1224,19 @@ def test_list_jobs(): job1.should.have.key("description") job2 = client.create_job( - jobId=job_id+"1", + jobId=job_id + "1", targets=[thing["thingArn"]], document=json.dumps(job_document), description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) - job2.should.have.key("jobId").which.should.equal(job_id+"1") + job2.should.have.key("jobId").which.should.equal(job_id + "1") job2.should.have.key("jobArn") job2.should.have.key("description") @@ -1205,7 +1244,7 @@ def test_list_jobs(): jobs.should.have.key("jobs") jobs.should_not.have.key("nextToken") jobs["jobs"][0].should.have.key("jobId").which.should.equal(job_id) - jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id+"1") + jobs["jobs"][1].should.have.key("jobId").which.should.equal(job_id + "1") @mock_iot @@ -1297,14 +1336,21 @@ def test_describe_job_1(): job.should.have.key("job").which.should.have.key("lastUpdatedAt") job.should.have.key("job").which.should.have.key("createdAt") job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig") - job.should.have.key("job").which.should.have.key("targetSelection").which.should.equal("CONTINUOUS") + job.should.have.key("job").which.should.have.key( + "targetSelection" + ).which.should.equal("CONTINUOUS") job.should.have.key("job").which.should.have.key("presignedUrlConfig") - job.should.have.key("job").which.should.have.key("presignedUrlConfig").which.should.have.key( - "roleArn").which.should.equal("arn:aws:iam::1:role/service-role/iot_job_role") - job.should.have.key("job").which.should.have.key("presignedUrlConfig").which.should.have.key( - "expiresInSec").which.should.equal(123) - job.should.have.key("job").which.should.have.key("jobExecutionsRolloutConfig").which.should.have.key( - "maximumPerMinute").which.should.equal(10) + job.should.have.key("job").which.should.have.key( + "presignedUrlConfig" + ).which.should.have.key("roleArn").which.should.equal( + "arn:aws:iam::1:role/service-role/iot_job_role" + ) + job.should.have.key("job").which.should.have.key( + "presignedUrlConfig" + ).which.should.have.key("expiresInSec").which.should.equal(123) + job.should.have.key("job").which.should.have.key( + "jobExecutionsRolloutConfig" + ).which.should.have.key("maximumPerMinute").which.should.equal(10) @mock_iot @@ -1323,12 +1369,10 @@ def test_delete_job(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1359,12 +1403,10 @@ def test_cancel_job(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1381,10 +1423,18 @@ def test_cancel_job(): job = client.describe_job(jobId=job_id) job.should.have.key("job") job.should.have.key("job").which.should.have.key("jobId").which.should.equal(job_id) - job.should.have.key("job").which.should.have.key("status").which.should.equal("CANCELED") - job.should.have.key("job").which.should.have.key("forceCanceled").which.should.equal(False) - job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal("Because") - job.should.have.key("job").which.should.have.key("comment").which.should.equal("You are") + job.should.have.key("job").which.should.have.key("status").which.should.equal( + "CANCELED" + ) + job.should.have.key("job").which.should.have.key( + "forceCanceled" + ).which.should.equal(False) + job.should.have.key("job").which.should.have.key("reasonCode").which.should.equal( + "Because" + ) + job.should.have.key("job").which.should.have.key("comment").which.should.equal( + "You are" + ) @mock_iot @@ -1403,12 +1453,10 @@ def test_get_job_document_with_document_source(): documentSource="https://s3-eu-west-1.amazonaws.com/bucket-name/job_document.json", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1429,9 +1477,7 @@ def test_get_job_document_with_document(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1439,19 +1485,17 @@ def test_get_job_document_with_document(): document=json.dumps(job_document), presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) job.should.have.key("jobArn") job_document = client.get_job_document(jobId=job_id) - job_document.should.have.key("document").which.should.equal("{\"field\": \"value\"}") + job_document.should.have.key("document").which.should.equal('{"field": "value"}') @mock_iot @@ -1465,9 +1509,7 @@ def test_describe_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1476,12 +1518,10 @@ def test_describe_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1492,29 +1532,51 @@ def test_describe_job_execution(): job_execution.should.have.key("execution") job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") - job_execution["execution"].should.have.key("forceCanceled").which.should.equal(False) - job_execution["execution"].should.have.key("statusDetails").which.should.equal({"detailsMap": {}}) - job_execution["execution"].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) job_execution["execution"].should.have.key("queuedAt") job_execution["execution"].should.have.key("startedAt") job_execution["execution"].should.have.key("lastUpdatedAt") - job_execution["execution"].should.have.key("executionNumber").which.should.equal(123) + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) - job_execution["execution"].should.have.key("approximateSecondsBeforeTimedOut").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) - job_execution = client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=123) + job_execution = client.describe_job_execution( + jobId=job_id, thingName=name, executionNumber=123 + ) job_execution.should.have.key("execution") job_execution["execution"].should.have.key("jobId").which.should.equal(job_id) job_execution["execution"].should.have.key("status").which.should.equal("QUEUED") - job_execution["execution"].should.have.key("forceCanceled").which.should.equal(False) - job_execution["execution"].should.have.key("statusDetails").which.should.equal({"detailsMap": {}}) - job_execution["execution"].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["execution"].should.have.key("forceCanceled").which.should.equal( + False + ) + job_execution["execution"].should.have.key("statusDetails").which.should.equal( + {"detailsMap": {}} + ) + job_execution["execution"].should.have.key("thingArn").which.should.equal( + thing["thingArn"] + ) job_execution["execution"].should.have.key("queuedAt") job_execution["execution"].should.have.key("startedAt") job_execution["execution"].should.have.key("lastUpdatedAt") - job_execution["execution"].should.have.key("executionNumber").which.should.equal(123) + job_execution["execution"].should.have.key("executionNumber").which.should.equal( + 123 + ) job_execution["execution"].should.have.key("versionNumber").which.should.equal(123) - job_execution["execution"].should.have.key("approximateSecondsBeforeTimedOut").which.should.equal(123) + job_execution["execution"].should.have.key( + "approximateSecondsBeforeTimedOut" + ).which.should.equal(123) try: client.describe_job_execution(jobId=job_id, thingName=name, executionNumber=456) @@ -1536,9 +1598,7 @@ def test_cancel_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1547,12 +1607,10 @@ def test_cancel_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1576,9 +1634,7 @@ def test_delete_job_execution(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1587,12 +1643,10 @@ def test_delete_job_execution(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1620,9 +1674,7 @@ def test_list_job_executions_for_job(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1631,12 +1683,10 @@ def test_list_job_executions_for_job(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1645,7 +1695,9 @@ def test_list_job_executions_for_job(): job_execution = client.list_job_executions_for_job(jobId=job_id) job_execution.should.have.key("executionSummaries") - job_execution["executionSummaries"][0].should.have.key("thingArn").which.should.equal(thing["thingArn"]) + job_execution["executionSummaries"][0].should.have.key( + "thingArn" + ).which.should.equal(thing["thingArn"]) @mock_iot @@ -1659,9 +1711,7 @@ def test_list_job_executions_for_thing(): thing.should.have.key("thingArn") # job document - job_document = { - "field": "value" - } + job_document = {"field": "value"} job = client.create_job( jobId=job_id, @@ -1670,12 +1720,10 @@ def test_list_job_executions_for_thing(): description="Description", presignedUrlConfig={ "roleArn": "arn:aws:iam::1:role/service-role/iot_job_role", - "expiresInSec": 123 + "expiresInSec": 123, }, targetSelection="CONTINUOUS", - jobExecutionsRolloutConfig={ - "maximumPerMinute": 10 - } + jobExecutionsRolloutConfig={"maximumPerMinute": 10}, ) job.should.have.key("jobId").which.should.equal(job_id) @@ -1684,5 +1732,6 @@ def test_list_job_executions_for_thing(): job_execution = client.list_job_executions_for_thing(thingName=name) job_execution.should.have.key("executionSummaries") - job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal(job_id) - + job_execution["executionSummaries"][0].should.have.key("jobId").which.should.equal( + job_id + ) From 4f86a9c3cd89ced8610153805a934eed05c3e659 Mon Sep 17 00:00:00 2001 From: Stephan Huber Date: Mon, 23 Dec 2019 10:24:56 +0100 Subject: [PATCH 024/125] replace CRLF line ending with LF --- file.tmp | 9 --------- travis_moto_server.sh | 8 ++++---- 2 files changed, 4 insertions(+), 13 deletions(-) delete mode 100644 file.tmp diff --git a/file.tmp b/file.tmp deleted file mode 100644 index 0b91630a9..000000000 --- a/file.tmp +++ /dev/null @@ -1,9 +0,0 @@ - - AWSTemplateFormatVersion: '2010-09-09' - Description: Simple CloudFormation Test Template - Resources: - S3Bucket: - Type: AWS::S3::Bucket - Properties: - AccessControl: PublicRead - BucketName: cf-test-bucket-1 diff --git a/travis_moto_server.sh b/travis_moto_server.sh index 3c6947fd9..902644b20 100755 --- a/travis_moto_server.sh +++ b/travis_moto_server.sh @@ -1,5 +1,5 @@ -#!/usr/bin/env bash -set -e -pip install flask -pip install /moto/dist/moto*.gz +#!/usr/bin/env bash +set -e +pip install flask +pip install /moto/dist/moto*.gz moto_server -H 0.0.0.0 -p 5000 \ No newline at end of file From 877f3b056aebf619ccb2fafa91ef334dc43bfaae Mon Sep 17 00:00:00 2001 From: Dejan Levec Date: Fri, 27 Dec 2019 18:53:14 +0100 Subject: [PATCH 025/125] Add IsTruncated to Route53.list_resource_record_sets --- moto/route53/responses.py | 1 + tests/test_route53/test_route53.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 3e688b65d..077c89a2c 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -271,6 +271,7 @@ LIST_RRSET_RESPONSE = """ diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index 0e9a1e2c0..746c78719 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -862,6 +862,8 @@ def test_list_resource_record_sets_name_type_filters(): StartRecordName=all_records[start_with][1], ) + response["IsTruncated"].should.equal(False) + returned_records = [ (record["Type"], record["Name"]) for record in response["ResourceRecordSets"] ] From 000cb968a44277d5dcd696f7745775468e3d45e6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 5 Jan 2020 11:36:51 +0000 Subject: [PATCH 026/125] #2623 - Only return response from lambda, skip log output --- moto/awslambda/models.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 95a5c4ad5..38ff81fb2 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -53,9 +53,6 @@ try: except ImportError: from backports.tempfile import TemporaryDirectory -# The lambci container is returning a special escape character for the "RequestID" fields. Unicode 033: -# _stderr_regex = re.compile(r"START|END|REPORT RequestId: .*") -_stderr_regex = re.compile(r"\033\[\d+.*") _orig_adapter_send = requests.adapters.HTTPAdapter.send docker_3 = docker.__version__[0] >= "3" @@ -385,7 +382,7 @@ class LambdaFunction(BaseModel): try: # TODO: I believe we can keep the container running and feed events as needed # also need to hook it up to the other services so it can make kws/s3 etc calls - # Should get invoke_id /RequestId from invovation + # Should get invoke_id /RequestId from invocation env_vars = { "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout, "AWS_LAMBDA_FUNCTION_NAME": self.function_name, @@ -453,14 +450,9 @@ class LambdaFunction(BaseModel): if exit_code != 0: raise Exception("lambda invoke failed output: {}".format(output)) - # strip out RequestId lines (TODO: This will return an additional '\n' in the response) - output = os.linesep.join( - [ - line - for line in self.convert(output).splitlines() - if not _stderr_regex.match(line) - ] - ) + # We only care about the response from the lambda + # Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25 + output = output.splitlines()[-1] return output, False except BaseException as e: traceback.print_exc() From eab9e15bf08bf89ec8552c614a39457899b8427f Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 5 Jan 2020 15:01:31 +0000 Subject: [PATCH 027/125] #2623 - Fix and simplify test in ServerMode --- tests/test_awslambda/test_lambda.py | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6fd97e325..e378f6ee2 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -58,8 +58,7 @@ def lambda_handler(event, context): volume_id = event.get('volume_id') vol = ec2.Volume(volume_id) - print('get volume details for %s\\nVolume - %s state=%s, size=%s' % (volume_id, volume_id, vol.state, vol.size)) - return event + return {'id': vol.id, 'state': vol.state, 'size': vol.size} """.format( base_url="motoserver:5000" if settings.TEST_SERVER_MODE @@ -181,27 +180,11 @@ if settings.TEST_SERVER_MODE: Payload=json.dumps(in_data), ) result["StatusCode"].should.equal(202) - msg = "get volume details for %s\nVolume - %s state=%s, size=%s\n%s" % ( - vol.id, - vol.id, - vol.state, - vol.size, - json.dumps(in_data).replace( - " ", "" - ), # Makes the tests pass as the result is missing the whitespace + actual_payload = result["Payload"].read().decode("utf-8") + expected_payload = json.dumps( + {"id": vol.id, "state": vol.state, "size": vol.size} ) - - log_result = base64.b64decode(result["LogResult"]).decode("utf-8") - - # The Docker lambda invocation will return an additional '\n', so need to replace it: - log_result = log_result.replace("\n\n", "\n") - log_result.should.equal(msg) - - payload = result["Payload"].read().decode("utf-8") - - # The Docker lambda invocation will return an additional '\n', so need to replace it: - payload = payload.replace("\n\n", "\n") - payload.should.equal(msg) + actual_payload.should.equal(expected_payload) @mock_logs From 68d882e6c0408b029ab0be5a8641d19c7652a154 Mon Sep 17 00:00:00 2001 From: Franz See Date: Sun, 5 Jan 2020 23:55:04 +0800 Subject: [PATCH 028/125] moto/issues/2672 | Modified 'token_use' to return 'id' for an id token, and 'access' for an access token --- moto/cognitoidp/models.py | 8 ++++---- tests/test_cognitoidp/test_cognitoidp.py | 3 ++- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 6700920ce..082fa5189 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -108,7 +108,7 @@ class CognitoIdpUserPool(BaseModel): return user_pool_json - def create_jwt(self, client_id, username, expires_in=60 * 60, extra_data={}): + def create_jwt(self, client_id, username, token_use, expires_in=60 * 60, extra_data={}): now = int(time.time()) payload = { "iss": "https://cognito-idp.{}.amazonaws.com/{}".format( @@ -116,7 +116,7 @@ class CognitoIdpUserPool(BaseModel): ), "sub": self.users[username].id, "aud": client_id, - "token_use": "id", + "token_use": token_use, "auth_time": now, "exp": now + expires_in, } @@ -125,7 +125,7 @@ class CognitoIdpUserPool(BaseModel): return jws.sign(payload, self.json_web_key, algorithm="RS256"), expires_in def create_id_token(self, client_id, username): - id_token, expires_in = self.create_jwt(client_id, username) + id_token, expires_in = self.create_jwt(client_id, username, "id") self.id_tokens[id_token] = (client_id, username) return id_token, expires_in @@ -137,7 +137,7 @@ class CognitoIdpUserPool(BaseModel): def create_access_token(self, client_id, username): extra_data = self.get_user_extra_data_by_client_id(client_id, username) access_token, expires_in = self.create_jwt( - client_id, username, extra_data=extra_data + client_id, username, "access", extra_data=extra_data ) self.access_tokens[access_token] = (client_id, username) return access_token, expires_in diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 7ac1038b0..71a6e3191 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1142,12 +1142,13 @@ def test_token_legitimacy(): id_claims = json.loads(jws.verify(id_token, json_web_key, "RS256")) id_claims["iss"].should.equal(issuer) id_claims["aud"].should.equal(client_id) + id_claims["token_use"].should.equal("id") access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) for k, v in outputs["additional_fields"].items(): access_claims[k].should.equal(v) - + access_claims["token_use"].should.equal("access") @mock_cognitoidp def test_change_password(): From a8e1a3bf08312581bf4fae1908cc1bcb76aef7d6 Mon Sep 17 00:00:00 2001 From: Franz See Date: Mon, 6 Jan 2020 13:29:23 +0800 Subject: [PATCH 029/125] moto/issues/2672 | Formatted using black --- moto/cognitoidp/models.py | 4 +++- tests/test_cognitoidp/test_cognitoidp.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 082fa5189..b67239e93 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -108,7 +108,9 @@ class CognitoIdpUserPool(BaseModel): return user_pool_json - def create_jwt(self, client_id, username, token_use, expires_in=60 * 60, extra_data={}): + def create_jwt( + self, client_id, username, token_use, expires_in=60 * 60, extra_data={} + ): now = int(time.time()) payload = { "iss": "https://cognito-idp.{}.amazonaws.com/{}".format( diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 71a6e3191..79e6dbbb8 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1150,6 +1150,7 @@ def test_token_legitimacy(): access_claims[k].should.equal(v) access_claims["token_use"].should.equal("access") + @mock_cognitoidp def test_change_password(): conn = boto3.client("cognito-idp", "us-west-2") From 5f59cb7fb0551eb39659228b311f894fa55fef96 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Mon, 6 Jan 2020 08:16:09 +0000 Subject: [PATCH 030/125] #2674 - ListAppend should also work when adding maps to a list --- moto/dynamodb2/models.py | 2 +- tests/test_dynamodb2/test_dynamodb.py | 52 +++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 1 deletion(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index d4907cba5..2313a6e41 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -457,7 +457,7 @@ class Item(BaseModel): ) if not old_list.is_list(): raise ParamValidationError - old_list.value.extend(new_value["L"]) + old_list.value.extend([DynamoType(v) for v in new_value["L"]]) value = old_list return value diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 831538054..2d961b406 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3489,6 +3489,58 @@ def test_update_supports_nested_list_append_onto_another_list(): ) +@mock_dynamodb2 +def test_update_supports_list_append_maps(): + client = boto3.client("dynamodb", region_name="us-west-1") + client.create_table( + AttributeDefinitions=[ + {"AttributeName": "id", "AttributeType": "S"}, + {"AttributeName": "rid", "AttributeType": "S"}, + ], + TableName="TestTable", + KeySchema=[ + {"AttributeName": "id", "KeyType": "HASH"}, + {"AttributeName": "rid", "KeyType": "RANGE"}, + ], + ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5}, + ) + client.put_item( + TableName="TestTable", + Item={ + "id": {"S": "nested_list_append"}, + "rid": {"S": "range_key"}, + "a": {"L": [{"M": {"b": {"S": "bar1"}}}]}, + }, + ) + + # Update item using list_append expression + client.update_item( + TableName="TestTable", + Key={"id": {"S": "nested_list_append"}, "rid": {"S": "range_key"}}, + UpdateExpression="SET a = list_append(a, :i)", + ExpressionAttributeValues={":i": {"L": [{"M": {"b": {"S": "bar2"}}}]}}, + ) + + # Verify item is appended to the existing list + result = client.query( + TableName="TestTable", + KeyConditionExpression="id = :i AND begins_with(rid, :r)", + ExpressionAttributeValues={ + ":i": {"S": "nested_list_append"}, + ":r": {"S": "range_key"}, + }, + )["Items"] + result.should.equal( + [ + { + "a": {"L": [{"M": {"b": {"S": "bar1"}}}, {"M": {"b": {"S": "bar2"}}}]}, + "rid": {"S": "range_key"}, + "id": {"S": "nested_list_append"}, + } + ] + ) + + @mock_dynamodb2 def test_update_catches_invalid_list_append_operation(): client = boto3.client("dynamodb", region_name="us-east-1") From d06a5d3a2b2947e029f544ebbd84eca43e1f6eb5 Mon Sep 17 00:00:00 2001 From: Patrick Delaney Date: Tue, 7 Jan 2020 10:12:50 -0500 Subject: [PATCH 031/125] fix: small fixes to get scripts/scaffold.py working --- scripts/scaffold.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/scaffold.py b/scripts/scaffold.py index be154f103..43a648b48 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -20,8 +20,8 @@ import jinja2 from prompt_toolkit import ( prompt ) -from prompt_toolkit.contrib.completers import WordCompleter -from prompt_toolkit.shortcuts import print_tokens +from prompt_toolkit.completion import WordCompleter +from prompt_toolkit.shortcuts import print_formatted_text from botocore import xform_name from botocore.session import Session @@ -149,12 +149,12 @@ def append_mock_dict_to_backends_py(service): with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] - if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)): + if any(_ for _ in lines if re.match(".*\"{}\": {}_backends.*".format(service, service), _)): return - filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)] + filtered_lines = [_ for _ in lines if re.match(".*\".*\":.*_backends.*", _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " '{}': {}_backends,".format(service, get_escaped_service(service)) + new_line = " \"{}\": {}_backends,".format(service, get_escaped_service(service)) prev_line = lines[last_elem_line_index] if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' From cba3cfc3843de2bd3fb6d1ad6867761af877843d Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 9 Jan 2020 09:10:16 +0000 Subject: [PATCH 032/125] Escape curly braces in formatting string --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index e378f6ee2..2d9a6bd5d 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -58,7 +58,7 @@ def lambda_handler(event, context): volume_id = event.get('volume_id') vol = ec2.Volume(volume_id) - return {'id': vol.id, 'state': vol.state, 'size': vol.size} + return {{'id': vol.id, 'state': vol.state, 'size': vol.size}} """.format( base_url="motoserver:5000" if settings.TEST_SERVER_MODE From 58844830199ed269b44e17ef8e839b2be1cfd2b0 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Thu, 9 Jan 2020 10:08:35 +0000 Subject: [PATCH 033/125] Compare map, instead of string repr --- tests/test_awslambda/test_lambda.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2d9a6bd5d..2835729f8 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -180,10 +180,8 @@ if settings.TEST_SERVER_MODE: Payload=json.dumps(in_data), ) result["StatusCode"].should.equal(202) - actual_payload = result["Payload"].read().decode("utf-8") - expected_payload = json.dumps( - {"id": vol.id, "state": vol.state, "size": vol.size} - ) + actual_payload = json.loads(result["Payload"].read().decode("utf-8")) + expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size} actual_payload.should.equal(expected_payload) From 2cb3f327de85268165b63f8576e4e135eb59333c Mon Sep 17 00:00:00 2001 From: Don Kuntz Date: Thu, 9 Jan 2020 22:50:55 -0600 Subject: [PATCH 034/125] Store 'networkMode' in ECS Task Definitions instead of just throwing it away --- moto/ecs/models.py | 9 +++++++-- moto/ecs/responses.py | 3 ++- tests/test_ecs/test_ecs_boto3.py | 2 ++ 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 845bdf650..30075f7f0 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -118,6 +118,7 @@ class TaskDefinition(BaseObject): revision, container_definitions, region_name, + network_mode=None, volumes=None, tags=None, ): @@ -132,6 +133,10 @@ class TaskDefinition(BaseObject): self.volumes = [] else: self.volumes = volumes + if network_mode is None: + self.network_mode = 'bridge' + else: + self.network_mode = network_mode @property def response_object(self): @@ -553,7 +558,7 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) def register_task_definition( - self, family, container_definitions, volumes, tags=None + self, family, container_definitions, volumes=None, network_mode=None, tags=None ): if family in self.task_definitions: last_id = self._get_last_task_definition_revision_id(family) @@ -562,7 +567,7 @@ class EC2ContainerServiceBackend(BaseBackend): self.task_definitions[family] = {} revision = 1 task_definition = TaskDefinition( - family, revision, container_definitions, self.region_name, volumes, tags + family, revision, container_definitions, self.region_name, volumes=volumes, network_mode=network_mode, tags=tags ) self.task_definitions[family][revision] = task_definition diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index d08bded2c..ebbfeb84b 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -62,8 +62,9 @@ class EC2ContainerServiceResponse(BaseResponse): container_definitions = self._get_param("containerDefinitions") volumes = self._get_param("volumes") tags = self._get_param("tags") + network_mode = self._get_param('networkMode') task_definition = self.ecs_backend.register_task_definition( - family, container_definitions, volumes, tags + family, container_definitions, volumes=volumes, network_mode=network_mode, tags=tags, ) return json.dumps({"taskDefinition": task_definition.response_object}) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 973c95b81..75598f6e5 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -94,6 +94,7 @@ def test_register_task_definition(): "logConfiguration": {"logDriver": "json-file"}, } ], + networkMode='bridge', tags=[ {"key": "createdBy", "value": "moto-unittest"}, {"key": "foo", "value": "bar"}, @@ -124,6 +125,7 @@ def test_register_task_definition(): response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"][ "logDriver" ].should.equal("json-file") + response['taskDefinition']['networkMode'].should.equal('bridge') @mock_ecs From fd1fdde1bf8c0b3b179057e24fbce1c2b1f0a3bf Mon Sep 17 00:00:00 2001 From: Don Kuntz Date: Thu, 9 Jan 2020 23:45:14 -0600 Subject: [PATCH 035/125] Allow black to reformat correctly --- moto/ecs/models.py | 10 ++++++++-- moto/ecs/responses.py | 8 ++++++-- tests/test_ecs/test_ecs_boto3.py | 4 ++-- 3 files changed, 16 insertions(+), 6 deletions(-) diff --git a/moto/ecs/models.py b/moto/ecs/models.py index 30075f7f0..30e4687c4 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -134,7 +134,7 @@ class TaskDefinition(BaseObject): else: self.volumes = volumes if network_mode is None: - self.network_mode = 'bridge' + self.network_mode = "bridge" else: self.network_mode = network_mode @@ -567,7 +567,13 @@ class EC2ContainerServiceBackend(BaseBackend): self.task_definitions[family] = {} revision = 1 task_definition = TaskDefinition( - family, revision, container_definitions, self.region_name, volumes=volumes, network_mode=network_mode, tags=tags + family, + revision, + container_definitions, + self.region_name, + volumes=volumes, + network_mode=network_mode, + tags=tags, ) self.task_definitions[family][revision] = task_definition diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index ebbfeb84b..49bf022b4 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -62,9 +62,13 @@ class EC2ContainerServiceResponse(BaseResponse): container_definitions = self._get_param("containerDefinitions") volumes = self._get_param("volumes") tags = self._get_param("tags") - network_mode = self._get_param('networkMode') + network_mode = self._get_param("networkMode") task_definition = self.ecs_backend.register_task_definition( - family, container_definitions, volumes=volumes, network_mode=network_mode, tags=tags, + family, + container_definitions, + volumes=volumes, + network_mode=network_mode, + tags=tags, ) return json.dumps({"taskDefinition": task_definition.response_object}) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 75598f6e5..f1f1e04ae 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -94,7 +94,7 @@ def test_register_task_definition(): "logConfiguration": {"logDriver": "json-file"}, } ], - networkMode='bridge', + networkMode="bridge", tags=[ {"key": "createdBy", "value": "moto-unittest"}, {"key": "foo", "value": "bar"}, @@ -125,7 +125,7 @@ def test_register_task_definition(): response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"][ "logDriver" ].should.equal("json-file") - response['taskDefinition']['networkMode'].should.equal('bridge') + response["taskDefinition"]["networkMode"].should.equal("bridge") @mock_ecs From 6dac06ed7c8994719a5ac485d471d2c22045a05d Mon Sep 17 00:00:00 2001 From: Sebastian P Date: Fri, 10 Jan 2020 16:08:34 +0100 Subject: [PATCH 036/125] setup.py: Unlock use with jsondiff >1.1.2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 97a6341ff..d09f8fc7b 100755 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ install_requires = [ "python-jose<4.0.0", "mock", "docker>=2.5.1", - "jsondiff==1.1.2", + "jsondiff>=1.1.2", "aws-xray-sdk!=0.96,>=0.93", "responses>=0.9.0", "idna<2.9,>=2.5", From 9ce1ee49d763dc73d6630065c2b7adbacb279ad1 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 12 Jan 2020 12:05:08 +0000 Subject: [PATCH 037/125] #2626 - DynamoDB - FilterExpression should ignore items with non-existent attribute --- moto/dynamodb2/comparisons.py | 8 -------- tests/test_dynamodb2/test_dynamodb.py | 26 ++++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 69d7f74e0..372f612c3 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -979,8 +979,6 @@ class OpLessThan(Op): # In python3 None is not a valid comparator when using < or > so must be handled specially if lhs and rhs: return lhs < rhs - elif lhs is None and rhs: - return True else: return False @@ -994,8 +992,6 @@ class OpGreaterThan(Op): # In python3 None is not a valid comparator when using < or > so must be handled specially if lhs and rhs: return lhs > rhs - elif lhs and rhs is None: - return True else: return False @@ -1027,8 +1023,6 @@ class OpLessThanOrEqual(Op): # In python3 None is not a valid comparator when using < or > so must be handled specially if lhs and rhs: return lhs <= rhs - elif lhs is None and rhs or lhs is None and rhs is None: - return True else: return False @@ -1042,8 +1036,6 @@ class OpGreaterThanOrEqual(Op): # In python3 None is not a valid comparator when using < or > so must be handled specially if lhs and rhs: return lhs >= rhs - elif lhs and rhs is None or lhs is None and rhs is None: - return True else: return False diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 831538054..b63a7c19e 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1719,6 +1719,32 @@ def test_scan_filter4(): assert response["Count"] == 0 +@mock_dynamodb2 +def test_scan_filter_should_not_return_non_existing_attributes(): + table_name = "my-table" + item = {"partitionKey": "pk-2", "my-attr": 42} + # Create table + res = boto3.resource("dynamodb") + res.create_table( + TableName=table_name, + KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "partitionKey", "AttributeType": "S"}], + BillingMode="PAY_PER_REQUEST", + ) + table = res.Table(table_name) + # Insert items + table.put_item(Item={"partitionKey": "pk-1"}) + table.put_item(Item=item) + # Verify a few operations + # Assert we only find the item that has this attribute + table.scan(FilterExpression=Attr("my-attr").lt(43))["Items"].should.equal([item]) + table.scan(FilterExpression=Attr("my-attr").lte(42))["Items"].should.equal([item]) + table.scan(FilterExpression=Attr("my-attr").gte(42))["Items"].should.equal([item]) + table.scan(FilterExpression=Attr("my-attr").gt(41))["Items"].should.equal([item]) + # Sanity check that we can't find the item if the FE is wrong + table.scan(FilterExpression=Attr("my-attr").gt(43))["Items"].should.equal([]) + + @mock_dynamodb2 def test_bad_scan_filter(): client = boto3.client("dynamodb", region_name="us-east-1") From 8c920cce109552d4841bf1ff72fda53f69b3bd45 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 12 Jan 2020 12:20:55 +0000 Subject: [PATCH 038/125] Specify region in tests --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index b63a7c19e..333eba135 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -1724,7 +1724,7 @@ def test_scan_filter_should_not_return_non_existing_attributes(): table_name = "my-table" item = {"partitionKey": "pk-2", "my-attr": 42} # Create table - res = boto3.resource("dynamodb") + res = boto3.resource("dynamodb", region_name="us-east-1") res.create_table( TableName=table_name, KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}], From fba84ec34b2e0dbf9e8ade3ef0965de882e76bab Mon Sep 17 00:00:00 2001 From: Nikhil Date: Tue, 14 Jan 2020 12:28:48 +0530 Subject: [PATCH 039/125] Fixed a typo in README.md - related to https://github.com/spulec/moto/issues/2691 --- README.md | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 4024328a9..f5c45a6b6 100644 --- a/README.md +++ b/README.md @@ -283,14 +283,14 @@ def test_describe_instances_allowed(): ] } access_key = ... - # create access key for an IAM user/assumed role that has the policy above. + # create access key for an IAM user/assumed role that has the policy above. # this part should call __exactly__ 4 AWS actions, so that authentication and authorization starts exactly after this - + client = boto3.client('ec2', region_name='us-east-1', aws_access_key_id=access_key['AccessKeyId'], aws_secret_access_key=access_key['SecretAccessKey']) - - # if the IAM principal whose access key is used, does not have the permission to describe instances, this will fail + + # if the IAM principal whose access key is used, does not have the permission to describe instances, this will fail instances = client.describe_instances()['Reservations'][0]['Instances'] assert len(instances) == 0 ``` @@ -310,16 +310,16 @@ You need to ensure that the mocks are actually in place. Changes made to recent have altered some of the mock behavior. In short, you need to ensure that you _always_ do the following: 1. Ensure that your tests have dummy environment variables set up: - + export AWS_ACCESS_KEY_ID='testing' export AWS_SECRET_ACCESS_KEY='testing' export AWS_SECURITY_TOKEN='testing' export AWS_SESSION_TOKEN='testing' - -1. __VERY IMPORTANT__: ensure that you have your mocks set up __BEFORE__ your `boto3` client is established. + +1. __VERY IMPORTANT__: ensure that you have your mocks set up __BEFORE__ your `boto3` client is established. This can typically happen if you import a module that has a `boto3` client instantiated outside of a function. See the pesky imports section below on how to work around this. - + ### Example on usage? If you are a user of [pytest](https://pytest.org/en/latest/), you can leverage [pytest fixtures](https://pytest.org/en/latest/fixture.html#fixture) to help set up your mocks and other AWS resources that you would need. @@ -354,7 +354,7 @@ def cloudwatch(aws_credentials): ... etc. ``` -In the code sample above, all of the AWS/mocked fixtures take in a parameter of `aws_credentials`, +In the code sample above, all of the AWS/mocked fixtures take in a parameter of `aws_credentials`, which sets the proper fake environment variables. The fake environment variables are used so that `botocore` doesn't try to locate real credentials on your system. @@ -364,7 +364,7 @@ def test_create_bucket(s3): # s3 is a fixture defined above that yields a boto3 s3 client. # Feel free to instantiate another boto3 S3 client -- Keep note of the region though. s3.create_bucket(Bucket="somebucket") - + result = s3.list_buckets() assert len(result['Buckets']) == 1 assert result['Buckets'][0]['Name'] == 'somebucket' @@ -373,7 +373,7 @@ def test_create_bucket(s3): ### What about those pesky imports? Recall earlier, it was mentioned that mocks should be established __BEFORE__ the clients are set up. One way to avoid import issues is to make use of local Python imports -- i.e. import the module inside of the unit -test you want to run vs. importing at the top of the file. +test you want to run vs. importing at the top of the file. Example: ```python @@ -381,12 +381,12 @@ def test_something(s3): from some.package.that.does.something.with.s3 import some_func # <-- Local import for unit test # ^^ Importing here ensures that the mock has been established. - sume_func() # The mock has been established from the "s3" pytest fixture, so this function that uses + some_func() # The mock has been established from the "s3" pytest fixture, so this function that uses # a package-level S3 client will properly use the mock and not reach out to AWS. ``` ### Other caveats -For Tox, Travis CI, and other build systems, you might need to also perform a `touch ~/.aws/credentials` +For Tox, Travis CI, and other build systems, you might need to also perform a `touch ~/.aws/credentials` command before running the tests. As long as that file is present (empty preferably) and the environment variables above are set, you should be good to go. From db559e7e06bcadad70be048aeb4f1b1119671375 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Tue, 14 Jan 2020 09:55:32 -0800 Subject: [PATCH 040/125] Fix some typos --- moto/ec2/responses/security_groups.py | 8 ++++---- tests/test_autoscaling/test_autoscaling.py | 14 +++++++------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/moto/ec2/responses/security_groups.py b/moto/ec2/responses/security_groups.py index 6f2926f61..f0002d5bd 100644 --- a/moto/ec2/responses/security_groups.py +++ b/moto/ec2/responses/security_groups.py @@ -104,7 +104,7 @@ class SecurityGroups(BaseResponse): if self.is_not_dryrun("GrantSecurityGroupIngress"): for args in self._process_rules_from_querystring(): self.ec2_backend.authorize_security_group_ingress(*args) - return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE + return AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE def create_security_group(self): name = self._get_param("GroupName") @@ -158,7 +158,7 @@ class SecurityGroups(BaseResponse): if self.is_not_dryrun("RevokeSecurityGroupIngress"): for args in self._process_rules_from_querystring(): self.ec2_backend.revoke_security_group_ingress(*args) - return REVOKE_SECURITY_GROUP_INGRESS_REPONSE + return REVOKE_SECURITY_GROUP_INGRESS_RESPONSE CREATE_SECURITY_GROUP_RESPONSE = """ @@ -265,12 +265,12 @@ DESCRIBE_SECURITY_GROUPS_RESPONSE = ( """ ) -AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = """ +AUTHORIZE_SECURITY_GROUP_INGRESS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE true """ -REVOKE_SECURITY_GROUP_INGRESS_REPONSE = """ +REVOKE_SECURITY_GROUP_INGRESS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE true """ diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index c46bc7219..2e7255381 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -706,14 +706,14 @@ def test_create_autoscaling_group_boto3(): "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "propogated-tag-key", - "Value": "propogate-tag-value", + "Value": "propagate-tag-value", "PropagateAtLaunch": True, }, { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "not-propogated-tag-key", - "Value": "not-propogate-tag-value", + "Value": "not-propagate-tag-value", "PropagateAtLaunch": False, }, ], @@ -744,14 +744,14 @@ def test_create_autoscaling_group_from_instance(): "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "propogated-tag-key", - "Value": "propogate-tag-value", + "Value": "propagate-tag-value", "PropagateAtLaunch": True, }, { "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "not-propogated-tag-key", - "Value": "not-propogate-tag-value", + "Value": "not-propagate-tag-value", "PropagateAtLaunch": False, }, ], @@ -1062,7 +1062,7 @@ def test_detach_one_instance_decrement(): "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "propogated-tag-key", - "Value": "propogate-tag-value", + "Value": "propagate-tag-value", "PropagateAtLaunch": True, } ], @@ -1116,7 +1116,7 @@ def test_detach_one_instance(): "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "propogated-tag-key", - "Value": "propogate-tag-value", + "Value": "propagate-tag-value", "PropagateAtLaunch": True, } ], @@ -1169,7 +1169,7 @@ def test_attach_one_instance(): "ResourceId": "test_asg", "ResourceType": "auto-scaling-group", "Key": "propogated-tag-key", - "Value": "propogate-tag-value", + "Value": "propagate-tag-value", "PropagateAtLaunch": True, } ], From db75c9e25ca49da7c1bb7e330579db695fbeff3b Mon Sep 17 00:00:00 2001 From: Franz See Date: Sun, 5 Jan 2020 23:13:36 +0800 Subject: [PATCH 041/125] moto/issues/2670 | Moved population of user attributes from accessToken to idToken --- moto/cognitoidp/models.py | 6 +++--- tests/test_cognitoidp/test_cognitoidp.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 78025627a..9f39d7a5f 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -127,7 +127,8 @@ class CognitoIdpUserPool(BaseModel): return jws.sign(payload, self.json_web_key, algorithm="RS256"), expires_in def create_id_token(self, client_id, username): - id_token, expires_in = self.create_jwt(client_id, username, "id") + extra_data = self.get_user_extra_data_by_client_id(client_id, username) + id_token, expires_in = self.create_jwt(client_id, username, "id", extra_data=extra_data) self.id_tokens[id_token] = (client_id, username) return id_token, expires_in @@ -137,9 +138,8 @@ class CognitoIdpUserPool(BaseModel): return refresh_token def create_access_token(self, client_id, username): - extra_data = self.get_user_extra_data_by_client_id(client_id, username) access_token, expires_in = self.create_jwt( - client_id, username, "access", extra_data=extra_data + client_id, username, "access" ) self.access_tokens[access_token] = (client_id, username) return access_token, expires_in diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 79e6dbbb8..6a13683f0 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -1143,11 +1143,11 @@ def test_token_legitimacy(): id_claims["iss"].should.equal(issuer) id_claims["aud"].should.equal(client_id) id_claims["token_use"].should.equal("id") + for k, v in outputs["additional_fields"].items(): + id_claims[k].should.equal(v) access_claims = json.loads(jws.verify(access_token, json_web_key, "RS256")) access_claims["iss"].should.equal(issuer) access_claims["aud"].should.equal(client_id) - for k, v in outputs["additional_fields"].items(): - access_claims[k].should.equal(v) access_claims["token_use"].should.equal("access") From 44e92f58ec44250c0701209549104d4545304ae8 Mon Sep 17 00:00:00 2001 From: Franz See Date: Wed, 15 Jan 2020 23:33:26 +0800 Subject: [PATCH 042/125] moto/issues/2670 | Used black to format the code --- moto/cognitoidp/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 9f39d7a5f..96b23a404 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -128,7 +128,9 @@ class CognitoIdpUserPool(BaseModel): def create_id_token(self, client_id, username): extra_data = self.get_user_extra_data_by_client_id(client_id, username) - id_token, expires_in = self.create_jwt(client_id, username, "id", extra_data=extra_data) + id_token, expires_in = self.create_jwt( + client_id, username, "id", extra_data=extra_data + ) self.id_tokens[id_token] = (client_id, username) return id_token, expires_in @@ -138,9 +140,7 @@ class CognitoIdpUserPool(BaseModel): return refresh_token def create_access_token(self, client_id, username): - access_token, expires_in = self.create_jwt( - client_id, username, "access" - ) + access_token, expires_in = self.create_jwt(client_id, username, "access") self.access_tokens[access_token] = (client_id, username) return access_token, expires_in From 33661d267e699a83828abc43cd2250b1e033fe9a Mon Sep 17 00:00:00 2001 From: Charles Park Date: Thu, 16 Jan 2020 16:33:59 -0500 Subject: [PATCH 043/125] Fix spelling typo --- docs/index.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/index.rst b/docs/index.rst index 6311597fe..22ac97228 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -76,7 +76,7 @@ Currently implemented Services: +---------------------------+-----------------------+------------------------------------+ | Logs | @mock_logs | basic endpoints done | +---------------------------+-----------------------+------------------------------------+ -| Organizations | @mock_organizations | some core edpoints done | +| Organizations | @mock_organizations | some core endpoints done | +---------------------------+-----------------------+------------------------------------+ | Polly | @mock_polly | all endpoints done | +---------------------------+-----------------------+------------------------------------+ From eaa8c8db6e77b93f12ee90f2aa0fed483ce45d30 Mon Sep 17 00:00:00 2001 From: Brady Date: Thu, 16 Jan 2020 21:00:24 -0500 Subject: [PATCH 044/125] add tagging support to events --- moto/events/models.py | 28 ++++++++ moto/events/responses.py | 23 ++++++ moto/utilities/__init__.py | 0 moto/utilities/tagging_service.py | 56 +++++++++++++++ tests/test_events/test_events.py | 74 ++++++++++++++++---- tests/test_utilities/test_tagging_service.py | 59 ++++++++++++++++ 6 files changed, 228 insertions(+), 12 deletions(-) create mode 100644 moto/utilities/__init__.py create mode 100644 moto/utilities/tagging_service.py create mode 100644 tests/test_utilities/test_tagging_service.py diff --git a/moto/events/models.py b/moto/events/models.py index 548d41393..695cfb17a 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -6,6 +6,7 @@ from boto3 import Session from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel from moto.sts.models import ACCOUNT_ID +from moto.utilities.tagging_service import TaggingService class Rule(BaseModel): @@ -104,6 +105,7 @@ class EventsBackend(BaseBackend): self.region_name = region_name self.event_buses = {} self.event_sources = {} + self.tagger = TaggingService() self._add_default_event_bus() @@ -361,6 +363,32 @@ class EventsBackend(BaseBackend): self.event_buses.pop(name, None) + def list_tags_for_resource(self, arn): + name = arn.split("/")[-1] + if name in self.rules: + return self.tagger.list_tags_for_resource(self.rules[name].arn) + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def tag_resource(self, arn, tags): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.tag_resource(self.rules[name].arn, tags) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + + def untag_resource(self, arn, tag_names): + name = arn.split("/")[-1] + if name in self.rules: + self.tagger.untag_resource_using_names(self.rules[name].arn, tag_names) + return {} + raise JsonRESTError( + "ResourceNotFoundException", "An entity that you specified does not exist." + ) + events_backends = {} for region in Session().get_available_regions("events"): diff --git a/moto/events/responses.py b/moto/events/responses.py index b415564f8..68c2114a6 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -297,3 +297,26 @@ class EventsHandler(BaseResponse): self.events_backend.delete_event_bus(name) return "", self.response_headers + + def list_tags_for_resource(self): + arn = self._get_param("ResourceARN") + + result = self.events_backend.list_tags_for_resource(arn) + + return json.dumps(result), self.response_headers + + def tag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("Tags") + + result = self.events_backend.tag_resource(arn, tags) + + return json.dumps(result), self.response_headers + + def untag_resource(self): + arn = self._get_param("ResourceARN") + tags = self._get_param("TagKeys") + + result = self.events_backend.untag_resource(arn, tags) + + return json.dumps(result), self.response_headers diff --git a/moto/utilities/__init__.py b/moto/utilities/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py new file mode 100644 index 000000000..8c7a86f1d --- /dev/null +++ b/moto/utilities/tagging_service.py @@ -0,0 +1,56 @@ +class TaggingService: + def __init__(self, tagName="Tags", keyName="Key", valueName="Value"): + self.tagName = tagName + self.keyName = keyName + self.valueName = valueName + self.tags = {} + + def list_tags_for_resource(self, arn): + result = [] + if arn in self.tags: + for k, v in self.tags[arn].items(): + result.append({self.keyName: k, self.valueName: v}) + return {self.tagName: result} + + def tag_resource(self, arn, tags): + if arn not in self.tags: + self.tags[arn] = {} + for t in tags: + if self.valueName in t: + self.tags[arn][t[self.keyName]] = t[self.valueName] + else: + self.tags[arn][t[self.keyName]] = None + + def untag_resource_using_names(self, arn, tag_names): + for name in tag_names: + if name in self.tags.get(arn, {}): + del self.tags[arn][name] + + def untag_resource_using_tags(self, arn, tags): + m = self.tags.get(arn, {}) + for t in tags: + if self.keyName in t: + if t[self.keyName] in m: + if self.valueName in t: + if m[t[self.keyName]] != t[self.valueName]: + continue + # If both key and value are provided, match both before deletion + del m[t[self.keyName]] + + def extract_tag_names(self, tags): + results = [] + if len(tags) == 0: + return results + for tag in tags: + if self.keyName in tag: + results.append(tag[self.keyName]) + return results + + def flatten_tag_list(self, tags): + result = {} + for t in tags: + if self.valueName in t: + result[t[self.keyName]] = t[self.valueName] + else: + result[t[self.keyName]] = None + return result diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 14d872806..d276a1705 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -1,12 +1,15 @@ -import random -import boto3 import json -import sure # noqa +import random +import unittest -from moto.events import mock_events +import boto3 from botocore.exceptions import ClientError from nose.tools import assert_raises + from moto.core import ACCOUNT_ID +from moto.core.exceptions import JsonRESTError +from moto.events import mock_events +from moto.events.models import EventsBackend RULES = [ {"Name": "test1", "ScheduleExpression": "rate(5 minutes)"}, @@ -136,14 +139,6 @@ def test_list_rule_names_by_target(): assert rule in test_2_target["Rules"] -@mock_events -def test_list_rules(): - client = generate_environment() - - rules = client.list_rules() - assert len(rules["Rules"]) == len(RULES) - - @mock_events def test_delete_rule(): client = generate_environment() @@ -461,3 +456,58 @@ def test_delete_event_bus_errors(): client.delete_event_bus.when.called_with(Name="default").should.throw( ClientError, "Cannot delete event bus default." ) + + +@mock_events +def test_rule_tagging_happy(): + client = generate_environment() + rule_name = get_random_rule()["Name"] + rule_arn = client.describe_rule(Name=rule_name).get("Arn") + + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + client.tag_resource(ResourceARN=rule_arn, Tags=tags) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + tc = unittest.TestCase("__init__") + expected = [{"Value": "value1", "Key": "key1"}, {"Value": "value2", "Key": "key2"}] + tc.assertTrue( + (expected[0] == actual[0] and expected[1] == actual[1]) + or (expected[1] == actual[0] and expected[0] == actual[1]) + ) + + client.untag_resource(ResourceARN=rule_arn, TagKeys=["key1"]) + + actual = client.list_tags_for_resource(ResourceARN=rule_arn).get("Tags") + expected = [{"Key": "key2", "Value": "value2"}] + assert expected == actual + + +def freeze_dict(obj): + if isinstance(obj, dict): + dict_items = list(obj.items()) + dict_items.append(("__frozen__", True)) + return tuple([(k, freeze_dict(v)) for k, v in dict_items]) + return obj + + +@mock_events +def test_rule_tagging_sad(): + b = EventsBackend("us-west-2") + + try: + b.tag_resource("unknown", []) + raise "tag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + b.untag_resource("unknown", []) + raise "untag_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass + + try: + b.list_tags_for_resource("unknown") + raise "list_tags_for_resource should fail if ResourceARN is not known" + except JsonRESTError: + pass diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py new file mode 100644 index 000000000..1cd820a19 --- /dev/null +++ b/tests/test_utilities/test_tagging_service.py @@ -0,0 +1,59 @@ +import unittest + +from moto.utilities.tagging_service import TaggingService + + +class TestTaggingService(unittest.TestCase): + def test_list_empty(self): + svc = TaggingService() + result = svc.list_tags_for_resource("test") + self.assertEqual(result, {"Tags": []}) + + def test_create_tag(self): + svc = TaggingService("TheTags", "TagKey", "TagValue") + tags = [{"TagKey": "key_key", "TagValue": "value_value"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} + self.assertDictEqual(expected, actual) + + def test_create_tag_without_value(self): + svc = TaggingService() + tags = [{"Key": "key_key"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"Tags": [{"Key": "key_key", "Value": None}]} + self.assertDictEqual(expected, actual) + + def test_delete_tag_using_names(self): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_list_empty_delete(self): + svc = TaggingService() + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_delete_tag_using_tags(self): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_tags("arn", tags) + result = svc.list_tags_for_resource("arn") + self.assertEqual(result, {"Tags": []}) + + def test_extract_tag_names(self): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + actual = svc.extract_tag_names(tags) + expected = ["key1", "key2"] + self.assertEqual(expected, actual) + + +if __name__ == "__main__": + unittest.main() From 1e851fb1d8b328b18a54702d31a44423138b1e83 Mon Sep 17 00:00:00 2001 From: Brady Date: Fri, 17 Jan 2020 10:12:58 -0500 Subject: [PATCH 045/125] remove dead code --- tests/test_events/test_events.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index d276a1705..4fb3b4029 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -482,14 +482,6 @@ def test_rule_tagging_happy(): assert expected == actual -def freeze_dict(obj): - if isinstance(obj, dict): - dict_items = list(obj.items()) - dict_items.append(("__frozen__", True)) - return tuple([(k, freeze_dict(v)) for k, v in dict_items]) - return obj - - @mock_events def test_rule_tagging_sad(): b = EventsBackend("us-west-2") From 6f02782624e0a60d388b2ba56cf7be9cf359582b Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 22 Jan 2020 11:30:17 +0000 Subject: [PATCH 046/125] #2627 - Change comparison to differentiate between 0 and None --- moto/dynamodb2/comparisons.py | 8 ++--- tests/test_dynamodb2/test_dynamodb.py | 42 +++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 4 deletions(-) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 372f612c3..29951d92d 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -977,7 +977,7 @@ class OpLessThan(Op): lhs = self.lhs.expr(item) rhs = self.rhs.expr(item) # In python3 None is not a valid comparator when using < or > so must be handled specially - if lhs and rhs: + if lhs is not None and rhs is not None: return lhs < rhs else: return False @@ -990,7 +990,7 @@ class OpGreaterThan(Op): lhs = self.lhs.expr(item) rhs = self.rhs.expr(item) # In python3 None is not a valid comparator when using < or > so must be handled specially - if lhs and rhs: + if lhs is not None and rhs is not None: return lhs > rhs else: return False @@ -1021,7 +1021,7 @@ class OpLessThanOrEqual(Op): lhs = self.lhs.expr(item) rhs = self.rhs.expr(item) # In python3 None is not a valid comparator when using < or > so must be handled specially - if lhs and rhs: + if lhs is not None and rhs is not None: return lhs <= rhs else: return False @@ -1034,7 +1034,7 @@ class OpGreaterThanOrEqual(Op): lhs = self.lhs.expr(item) rhs = self.rhs.expr(item) # In python3 None is not a valid comparator when using < or > so must be handled specially - if lhs and rhs: + if lhs is not None and rhs is not None: return lhs >= rhs else: return False diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 333eba135..1a0865ba8 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2531,6 +2531,48 @@ def test_condition_expressions(): ) +@mock_dynamodb2 +def test_condition_expression_numerical_attribute(): + dynamodb = boto3.resource("dynamodb") + dynamodb.create_table( + TableName="my-table", + KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}], + AttributeDefinitions=[{"AttributeName": "partitionKey", "AttributeType": "S"}], + ) + table = dynamodb.Table("my-table") + table.put_item(Item={"partitionKey": "pk-pos", "myAttr": 5}) + table.put_item(Item={"partitionKey": "pk-neg", "myAttr": -5}) + + # try to update the item we put in the table using numerical condition expression + # Specifically, verify that we can compare with a zero-value + # First verify that > and >= work on positive numbers + update_numerical_con_expr( + key="pk-pos", con_expr="myAttr > :zero", res="6", table=table + ) + update_numerical_con_expr( + key="pk-pos", con_expr="myAttr >= :zero", res="7", table=table + ) + # Second verify that < and <= work on negative numbers + update_numerical_con_expr( + key="pk-neg", con_expr="myAttr < :zero", res="-4", table=table + ) + update_numerical_con_expr( + key="pk-neg", con_expr="myAttr <= :zero", res="-3", table=table + ) + + +def update_numerical_con_expr(key, con_expr, res, table): + table.update_item( + Key={"partitionKey": key}, + UpdateExpression="ADD myAttr :one", + ExpressionAttributeValues={":zero": 0, ":one": 1}, + ConditionExpression=con_expr, + ) + table.get_item(Key={"partitionKey": key})["Item"]["myAttr"].should.equal( + Decimal(res) + ) + + @mock_dynamodb2 def test_condition_expression__attr_doesnt_exist(): client = boto3.client("dynamodb", region_name="us-east-1") From 7ff7ee4e8ebf621ad24cb4101c9b7069e43867c6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Wed, 22 Jan 2020 11:42:06 +0000 Subject: [PATCH 047/125] Test fix - Region must be specified --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 1a0865ba8..5a978edc0 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -2533,7 +2533,7 @@ def test_condition_expressions(): @mock_dynamodb2 def test_condition_expression_numerical_attribute(): - dynamodb = boto3.resource("dynamodb") + dynamodb = boto3.resource("dynamodb", region_name="us-east-1") dynamodb.create_table( TableName="my-table", KeySchema=[{"AttributeName": "partitionKey", "KeyType": "HASH"}], From ed81e36faffda8b1ab4a5d3176fc65307fe8a34d Mon Sep 17 00:00:00 2001 From: Andrey Kislyuk Date: Wed, 22 Jan 2020 16:08:42 -0800 Subject: [PATCH 048/125] awslambda: explicitly specify json-file log driver This is analogous to #2635. --- moto/awslambda/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 38ff81fb2..e43f8e5d0 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -409,6 +409,7 @@ class LambdaFunction(BaseModel): volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, + log_config=docker.types.LogConfig(type=docker.types.LogConfig.types.JSON), **run_kwargs ) finally: From 5ba786d97a0ba516d7c609991c1f403f865e4acc Mon Sep 17 00:00:00 2001 From: Andrey Kislyuk Date: Wed, 22 Jan 2020 16:39:11 -0800 Subject: [PATCH 049/125] awslambda: return actual result in sync invoke Return actual output of the Lambda instead of echoing the input. --- moto/awslambda/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 38ff81fb2..8d11a5832 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -472,7 +472,7 @@ class LambdaFunction(BaseModel): payload["result"] = response_headers["x-amz-log-result"] result = res.encode("utf-8") else: - result = json.dumps(payload) + result = res if errored: response_headers["x-amz-function-error"] = "Handled" From e3906043d7a77ccb3c656088bff5e8fce2386ec0 Mon Sep 17 00:00:00 2001 From: Andrey Kislyuk Date: Wed, 22 Jan 2020 16:58:25 -0800 Subject: [PATCH 050/125] Fix linter error --- moto/awslambda/models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index e43f8e5d0..4a223821b 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -394,6 +394,7 @@ class LambdaFunction(BaseModel): env_vars.update(self.environment_vars) container = output = exit_code = None + log_config = docker.types.LogConfig(type=docker.types.LogConfig.types.JSON) with _DockerDataVolumeContext(self) as data_vol: try: run_kwargs = ( @@ -409,7 +410,7 @@ class LambdaFunction(BaseModel): volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, - log_config=docker.types.LogConfig(type=docker.types.LogConfig.types.JSON), + log_config=log_config, **run_kwargs ) finally: From a32b3c4b597c24844e2ee3af24b48cce746bf0d0 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 22 Jan 2020 19:38:07 -0600 Subject: [PATCH 051/125] Fix SQS get_queue_attributes to allow RedrivePolicy. Closes #2682. --- moto/sqs/models.py | 1 + tests/test_sqs/test_sqs.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 40dd6ba97..8b8263e3c 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -183,6 +183,7 @@ class Queue(BaseModel): "MaximumMessageSize", "MessageRetentionPeriod", "QueueArn", + "RedrivePolicy", "ReceiveMessageWaitTimeSeconds", "VisibilityTimeout", ] diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 639d6e51c..c74c3822a 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -331,7 +331,20 @@ def test_delete_queue(): @mock_sqs def test_get_queue_attributes(): client = boto3.client("sqs", region_name="us-east-1") - response = client.create_queue(QueueName="test-queue") + + dlq_resp = client.create_queue(QueueName="test-dlr-queue") + dlq_arn1 = client.get_queue_attributes(QueueUrl=dlq_resp["QueueUrl"])["Attributes"][ + "QueueArn" + ] + + response = client.create_queue( + QueueName="test-queue", + Attributes={ + "RedrivePolicy": json.dumps( + {"deadLetterTargetArn": dlq_arn1, "maxReceiveCount": 2} + ), + }, + ) queue_url = response["QueueUrl"] response = client.get_queue_attributes(QueueUrl=queue_url) @@ -356,6 +369,7 @@ def test_get_queue_attributes(): "ApproximateNumberOfMessages", "MaximumMessageSize", "QueueArn", + "RedrivePolicy", "VisibilityTimeout", ], ) @@ -366,6 +380,9 @@ def test_get_queue_attributes(): "MaximumMessageSize": "65536", "QueueArn": "arn:aws:sqs:us-east-1:{}:test-queue".format(ACCOUNT_ID), "VisibilityTimeout": "30", + "RedrivePolicy": json.dumps( + {"deadLetterTargetArn": dlq_arn1, "maxReceiveCount": 2} + ), } ) From d73a548bb0b72c8ccbb70e316ae5112f2f9264c5 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 22 Jan 2020 19:45:09 -0600 Subject: [PATCH 052/125] Remove duplicate StorageClass in S3_MULTIPART_LIST_RESPONSE. --- moto/s3/responses.py | 1 - 1 file changed, 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 71f21c8e1..c8f4e082b 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1869,7 +1869,6 @@ S3_MULTIPART_LIST_RESPONSE = """ 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a webfile - STANDARD 1 {{ count }} {{ count }} From 19bf8bf76207a01a168ec842c2e4e917adc466dd Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Wed, 22 Jan 2020 20:43:34 -0600 Subject: [PATCH 053/125] Change S3 S3_ALL_BUCKETS response to return bucket creation_date in iso format. --- moto/s3/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index c8f4e082b..a04427172 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -1482,7 +1482,7 @@ S3_ALL_BUCKETS = """ Date: Mon, 20 Jan 2020 15:21:11 -0800 Subject: [PATCH 055/125] Fix more typos. --- moto/logs/responses.py | 4 +-- moto/rds2/models.py | 2 +- moto/rds2/responses.py | 4 +-- tests/test_ec2/test_route_tables.py | 4 +-- tests/test_ecr/test_ecr_boto3.py | 24 +++++++-------- tests/test_ecs/test_ecs_boto3.py | 2 +- tests/test_elbv2/test_elbv2.py | 2 +- tests/test_glue/test_datacatalog.py | 2 +- tests/test_kinesis/test_firehose.py | 2 +- tests/test_kinesis/test_kinesis.py | 2 +- tests/test_rds/test_rds.py | 8 ++--- tests/test_rds2/test_rds2.py | 30 +++++++++---------- tests/test_s3/test_s3.py | 2 +- .../test_s3bucket_path/test_s3bucket_path.py | 2 +- tests/test_sns/test_application.py | 12 ++++---- tests/test_sns/test_application_boto3.py | 12 ++++---- tests/test_sns/test_publishing_boto3.py | 2 +- tests/test_sqs/test_sqs.py | 4 +-- tests/test_swf/utils.py | 2 +- 19 files changed, 61 insertions(+), 61 deletions(-) diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 072c76b71..4631da2f9 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -103,7 +103,7 @@ class LogsResponse(BaseResponse): ( events, next_backward_token, - next_foward_token, + next_forward_token, ) = self.logs_backend.get_log_events( log_group_name, log_stream_name, @@ -117,7 +117,7 @@ class LogsResponse(BaseResponse): { "events": events, "nextBackwardToken": next_backward_token, - "nextForwardToken": next_foward_token, + "nextForwardToken": next_forward_token, } ) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index e648765b7..d2aa24a20 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -986,7 +986,7 @@ class RDS2Backend(BaseBackend): ) if option_group_kwargs["engine_name"] not in valid_option_group_engines.keys(): raise RDSClientError( - "InvalidParameterValue", "Invalid DB engine: non-existant" + "InvalidParameterValue", "Invalid DB engine: non-existent" ) if ( option_group_kwargs["major_engine_version"] diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 625838d4d..7c815b2d5 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -367,14 +367,14 @@ class RDS2Response(BaseResponse): def modify_db_parameter_group(self): db_parameter_group_name = self._get_param("DBParameterGroupName") - db_parameter_group_parameters = self._get_db_parameter_group_paramters() + db_parameter_group_parameters = self._get_db_parameter_group_parameters() db_parameter_group = self.backend.modify_db_parameter_group( db_parameter_group_name, db_parameter_group_parameters ) template = self.response_template(MODIFY_DB_PARAMETER_GROUP_TEMPLATE) return template.render(db_parameter_group=db_parameter_group) - def _get_db_parameter_group_paramters(self): + def _get_db_parameter_group_parameters(self): parameter_group_parameters = defaultdict(dict) for param_name, value in self.querystring.items(): if not param_name.startswith("Parameters.Parameter"): diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index dfb3292b6..347464691 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -236,8 +236,8 @@ def test_route_table_associations(): @mock_ec2_deprecated def test_route_table_replace_route_table_association(): """ - Note: Boto has deprecated replace_route_table_assocation (which returns status) - and now uses replace_route_table_assocation_with_assoc (which returns association ID). + Note: Boto has deprecated replace_route_table_association (which returns status) + and now uses replace_route_table_association_with_assoc (which returns association ID). """ conn = boto.connect_vpc("the_key", "the_secret") vpc = conn.create_vpc("10.0.0.0/16") diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 9115e3fad..82a2c7521 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -77,7 +77,7 @@ def test_describe_repositories(): response = client.describe_repositories() len(response["repositories"]).should.equal(2) - respository_arns = [ + repository_arns = [ "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1", "arn:aws:ecr:us-east-1:012345678910:repository/test_repository0", ] @@ -86,9 +86,9 @@ def test_describe_repositories(): response["repositories"][0]["repositoryArn"], response["repositories"][1]["repositoryArn"], ] - ).should.equal(set(respository_arns)) + ).should.equal(set(repository_arns)) - respository_uris = [ + repository_uris = [ "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1", "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0", ] @@ -97,7 +97,7 @@ def test_describe_repositories(): response["repositories"][0]["repositoryUri"], response["repositories"][1]["repositoryUri"], ] - ).should.equal(set(respository_uris)) + ).should.equal(set(repository_uris)) @mock_ecr @@ -108,7 +108,7 @@ def test_describe_repositories_1(): response = client.describe_repositories(registryId="012345678910") len(response["repositories"]).should.equal(2) - respository_arns = [ + repository_arns = [ "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1", "arn:aws:ecr:us-east-1:012345678910:repository/test_repository0", ] @@ -117,9 +117,9 @@ def test_describe_repositories_1(): response["repositories"][0]["repositoryArn"], response["repositories"][1]["repositoryArn"], ] - ).should.equal(set(respository_arns)) + ).should.equal(set(repository_arns)) - respository_uris = [ + repository_uris = [ "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1", "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository0", ] @@ -128,7 +128,7 @@ def test_describe_repositories_1(): response["repositories"][0]["repositoryUri"], response["repositories"][1]["repositoryUri"], ] - ).should.equal(set(respository_uris)) + ).should.equal(set(repository_uris)) @mock_ecr @@ -147,11 +147,11 @@ def test_describe_repositories_3(): _ = client.create_repository(repositoryName="test_repository0") response = client.describe_repositories(repositoryNames=["test_repository1"]) len(response["repositories"]).should.equal(1) - respository_arn = "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1" - response["repositories"][0]["repositoryArn"].should.equal(respository_arn) + repository_arn = "arn:aws:ecr:us-east-1:012345678910:repository/test_repository1" + response["repositories"][0]["repositoryArn"].should.equal(repository_arn) - respository_uri = "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1" - response["repositories"][0]["repositoryUri"].should.equal(respository_uri) + repository_uri = "012345678910.dkr.ecr.us-east-1.amazonaws.com/test_repository1" + response["repositories"][0]["repositoryUri"].should.equal(repository_uri) @mock_ecr diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index f1f1e04ae..69c920192 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -726,7 +726,7 @@ def test_delete_service(): @mock_ecs -def test_update_non_existant_service(): +def test_update_non_existent_service(): client = boto3.client("ecs", region_name="us-east-1") try: client.update_service( diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index eb5df14c3..af1b19f09 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1391,7 +1391,7 @@ def test_set_security_groups(): len(resp["LoadBalancers"][0]["SecurityGroups"]).should.equal(2) with assert_raises(ClientError): - client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existant"]) + client.set_security_groups(LoadBalancerArn=arn, SecurityGroups=["non_existent"]) @mock_elbv2 diff --git a/tests/test_glue/test_datacatalog.py b/tests/test_glue/test_datacatalog.py index 28281b18f..31731e598 100644 --- a/tests/test_glue/test_datacatalog.py +++ b/tests/test_glue/test_datacatalog.py @@ -132,7 +132,7 @@ def test_get_table_versions(): helpers.update_table(client, database_name, table_name, table_input) version_inputs["2"] = table_input - # Updateing with an indentical input should still create a new version + # Updateing with an identical input should still create a new version helpers.update_table(client, database_name, table_name, table_input) version_inputs["3"] = table_input diff --git a/tests/test_kinesis/test_firehose.py b/tests/test_kinesis/test_firehose.py index 5e8c4aa08..4f16dc82d 100644 --- a/tests/test_kinesis/test_firehose.py +++ b/tests/test_kinesis/test_firehose.py @@ -223,7 +223,7 @@ def test_create_stream_without_redshift(): @mock_kinesis -def test_deescribe_non_existant_stream(): +def test_deescribe_non_existent_stream(): client = boto3.client("firehose", region_name="us-east-1") client.describe_delivery_stream.when.called_with( diff --git a/tests/test_kinesis/test_kinesis.py b/tests/test_kinesis/test_kinesis.py index de1764892..b3251bb0f 100644 --- a/tests/test_kinesis/test_kinesis.py +++ b/tests/test_kinesis/test_kinesis.py @@ -32,7 +32,7 @@ def test_create_cluster(): @mock_kinesis_deprecated -def test_describe_non_existant_stream(): +def test_describe_non_existent_stream(): conn = boto.kinesis.connect_to_region("us-east-1") conn.describe_stream.when.called_with("not-a-stream").should.throw( ResourceNotFoundException diff --git a/tests/test_rds/test_rds.py b/tests/test_rds/test_rds.py index 4ebea0cf3..a3e7dc9dd 100644 --- a/tests/test_rds/test_rds.py +++ b/tests/test_rds/test_rds.py @@ -68,7 +68,7 @@ def test_get_databases_paginated(): @mock_rds_deprecated -def test_describe_non_existant_database(): +def test_describe_non_existent_database(): conn = boto.rds.connect_to_region("us-west-2") conn.get_all_dbinstances.when.called_with("not-a-db").should.throw(BotoServerError) @@ -86,7 +86,7 @@ def test_delete_database(): @mock_rds_deprecated -def test_delete_non_existant_database(): +def test_delete_non_existent_database(): conn = boto.rds.connect_to_region("us-west-2") conn.delete_dbinstance.when.called_with("not-a-db").should.throw(BotoServerError) @@ -119,7 +119,7 @@ def test_get_security_groups(): @mock_rds_deprecated -def test_get_non_existant_security_group(): +def test_get_non_existent_security_group(): conn = boto.rds.connect_to_region("us-west-2") conn.get_all_dbsecurity_groups.when.called_with("not-a-sg").should.throw( BotoServerError @@ -138,7 +138,7 @@ def test_delete_database_security_group(): @mock_rds_deprecated -def test_delete_non_existant_security_group(): +def test_delete_non_existent_security_group(): conn = boto.rds.connect_to_region("us-west-2") conn.delete_dbsecurity_group.when.called_with("not-a-db").should.throw( BotoServerError diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 47b45539d..9a5a73678 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -312,7 +312,7 @@ def test_get_databases_paginated(): @mock_rds2 -def test_describe_non_existant_database(): +def test_describe_non_existent_database(): conn = boto3.client("rds", region_name="us-west-2") conn.describe_db_instances.when.called_with( DBInstanceIdentifier="not-a-db" @@ -378,7 +378,7 @@ def test_rename_db_instance(): @mock_rds2 -def test_modify_non_existant_database(): +def test_modify_non_existent_database(): conn = boto3.client("rds", region_name="us-west-2") conn.modify_db_instance.when.called_with( DBInstanceIdentifier="not-a-db", AllocatedStorage=20, ApplyImmediately=True @@ -403,7 +403,7 @@ def test_reboot_db_instance(): @mock_rds2 -def test_reboot_non_existant_database(): +def test_reboot_non_existent_database(): conn = boto3.client("rds", region_name="us-west-2") conn.reboot_db_instance.when.called_with( DBInstanceIdentifier="not-a-db" @@ -444,7 +444,7 @@ def test_delete_database(): @mock_rds2 -def test_delete_non_existant_database(): +def test_delete_non_existent_database(): conn = boto3.client("rds2", region_name="us-west-2") conn.delete_db_instance.when.called_with( DBInstanceIdentifier="not-a-db" @@ -663,7 +663,7 @@ def test_describe_option_group(): @mock_rds2 -def test_describe_non_existant_option_group(): +def test_describe_non_existent_option_group(): conn = boto3.client("rds", region_name="us-west-2") conn.describe_option_groups.when.called_with( OptionGroupName="not-a-option-group" @@ -688,10 +688,10 @@ def test_delete_option_group(): @mock_rds2 -def test_delete_non_existant_option_group(): +def test_delete_non_existent_option_group(): conn = boto3.client("rds", region_name="us-west-2") conn.delete_option_group.when.called_with( - OptionGroupName="non-existant" + OptionGroupName="non-existent" ).should.throw(ClientError) @@ -754,10 +754,10 @@ def test_modify_option_group_no_options(): @mock_rds2 -def test_modify_non_existant_option_group(): +def test_modify_non_existent_option_group(): conn = boto3.client("rds", region_name="us-west-2") conn.modify_option_group.when.called_with( - OptionGroupName="non-existant", + OptionGroupName="non-existent", OptionsToInclude=[ ( "OptionName", @@ -771,7 +771,7 @@ def test_modify_non_existant_option_group(): @mock_rds2 -def test_delete_non_existant_database(): +def test_delete_non_existent_database(): conn = boto3.client("rds", region_name="us-west-2") conn.delete_db_instance.when.called_with( DBInstanceIdentifier="not-a-db" @@ -1053,7 +1053,7 @@ def test_get_security_groups(): @mock_rds2 -def test_get_non_existant_security_group(): +def test_get_non_existent_security_group(): conn = boto3.client("rds", region_name="us-west-2") conn.describe_db_security_groups.when.called_with( DBSecurityGroupName="not-a-sg" @@ -1076,7 +1076,7 @@ def test_delete_database_security_group(): @mock_rds2 -def test_delete_non_existant_security_group(): +def test_delete_non_existent_security_group(): conn = boto3.client("rds", region_name="us-west-2") conn.delete_db_security_group.when.called_with( DBSecurityGroupName="not-a-db" @@ -1615,7 +1615,7 @@ def test_describe_db_parameter_group(): @mock_rds2 -def test_describe_non_existant_db_parameter_group(): +def test_describe_non_existent_db_parameter_group(): conn = boto3.client("rds", region_name="us-west-2") db_parameter_groups = conn.describe_db_parameter_groups(DBParameterGroupName="test") len(db_parameter_groups["DBParameterGroups"]).should.equal(0) @@ -1669,10 +1669,10 @@ def test_modify_db_parameter_group(): @mock_rds2 -def test_delete_non_existant_db_parameter_group(): +def test_delete_non_existent_db_parameter_group(): conn = boto3.client("rds", region_name="us-west-2") conn.delete_db_parameter_group.when.called_with( - DBParameterGroupName="non-existant" + DBParameterGroupName="non-existent" ).should.throw(ClientError) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 682213d13..294beca87 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -566,7 +566,7 @@ def test_bucket_deletion(): # Get non-existing bucket conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - # Delete non-existant bucket + # Delete non-existent bucket conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) diff --git a/tests/test_s3bucket_path/test_s3bucket_path.py b/tests/test_s3bucket_path/test_s3bucket_path.py index e204d0527..a1bdc5b02 100644 --- a/tests/test_s3bucket_path/test_s3bucket_path.py +++ b/tests/test_s3bucket_path/test_s3bucket_path.py @@ -174,7 +174,7 @@ def test_bucket_deletion(): # Get non-existing bucket conn.get_bucket.when.called_with("foobar").should.throw(S3ResponseError) - # Delete non-existant bucket + # Delete non-existent bucket conn.delete_bucket.when.called_with("foobar").should.throw(S3ResponseError) diff --git a/tests/test_sns/test_application.py b/tests/test_sns/test_application.py index e4fe93d53..1f5526219 100644 --- a/tests/test_sns/test_application.py +++ b/tests/test_sns/test_application.py @@ -88,8 +88,8 @@ def test_list_platform_applications(): conn.create_platform_application(name="application1", platform="APNS") conn.create_platform_application(name="application2", platform="APNS") - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["ListPlatformApplicationsResponse"][ + applications_response = conn.list_platform_applications() + applications = applications_response["ListPlatformApplicationsResponse"][ "ListPlatformApplicationsResult" ]["PlatformApplications"] applications.should.have.length_of(2) @@ -101,8 +101,8 @@ def test_delete_platform_application(): conn.create_platform_application(name="application1", platform="APNS") conn.create_platform_application(name="application2", platform="APNS") - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["ListPlatformApplicationsResponse"][ + applications_response = conn.list_platform_applications() + applications = applications_response["ListPlatformApplicationsResponse"][ "ListPlatformApplicationsResult" ]["PlatformApplications"] applications.should.have.length_of(2) @@ -110,8 +110,8 @@ def test_delete_platform_application(): application_arn = applications[0]["PlatformApplicationArn"] conn.delete_platform_application(application_arn) - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["ListPlatformApplicationsResponse"][ + applications_response = conn.list_platform_applications() + applications = applications_response["ListPlatformApplicationsResponse"][ "ListPlatformApplicationsResult" ]["PlatformApplications"] applications.should.have.length_of(1) diff --git a/tests/test_sns/test_application_boto3.py b/tests/test_sns/test_application_boto3.py index 6f9be2926..f23b07543 100644 --- a/tests/test_sns/test_application_boto3.py +++ b/tests/test_sns/test_application_boto3.py @@ -88,8 +88,8 @@ def test_list_platform_applications(): Name="application2", Platform="APNS", Attributes={} ) - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["PlatformApplications"] + applications_response = conn.list_platform_applications() + applications = applications_response["PlatformApplications"] applications.should.have.length_of(2) @@ -103,15 +103,15 @@ def test_delete_platform_application(): Name="application2", Platform="APNS", Attributes={} ) - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["PlatformApplications"] + applications_response = conn.list_platform_applications() + applications = applications_response["PlatformApplications"] applications.should.have.length_of(2) application_arn = applications[0]["PlatformApplicationArn"] conn.delete_platform_application(PlatformApplicationArn=application_arn) - applications_repsonse = conn.list_platform_applications() - applications = applications_repsonse["PlatformApplications"] + applications_response = conn.list_platform_applications() + applications = applications_response["PlatformApplications"] applications.should.have.length_of(1) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index d85c8fefe..51e0a9f57 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -806,7 +806,7 @@ def test_filtering_string_array_with_string_no_array_no_match(): topic.publish( Message="no_match", MessageAttributes={ - "price": {"DataType": "String.Array", "StringValue": "one hundread"} + "price": {"DataType": "String.Array", "StringValue": "one hundred"} }, ) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index c74c3822a..413b16f80 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -208,7 +208,7 @@ def test_message_with_complex_attributes(): "ccc": {"StringValue": "testjunk", "DataType": "String"}, "aaa": {"BinaryValue": b"\x02\x03\x04", "DataType": "Binary"}, "zzz": {"DataType": "Number", "StringValue": "0230.01"}, - "öther_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, + "öthere_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") @@ -1197,7 +1197,7 @@ def test_permissions(): client.remove_permission(QueueUrl=queue_url, Label="account2") with assert_raises(ClientError): - client.remove_permission(QueueUrl=queue_url, Label="non_existant") + client.remove_permission(QueueUrl=queue_url, Label="non_existent") @mock_sqs diff --git a/tests/test_swf/utils.py b/tests/test_swf/utils.py index 48c2cbd94..2b2a2d025 100644 --- a/tests/test_swf/utils.py +++ b/tests/test_swf/utils.py @@ -5,7 +5,7 @@ from moto.swf.models import ActivityType, Domain, WorkflowType, WorkflowExecutio # Some useful constants # Here are some activity timeouts we use in moto/swf tests ; they're extracted -# from semi-real world example, the goal is mostly to have predictible and +# from semi-real world example, the goal is mostly to have predictable and # intuitive behaviour in moto/swf own tests... ACTIVITY_TASK_TIMEOUTS = { "heartbeatTimeout": "300", # 5 mins From b4127040d1c09547956e64122e2960f1ca3bf9be Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Thu, 23 Jan 2020 10:16:12 -0800 Subject: [PATCH 056/125] remove this change. --- tests/test_sqs/test_sqs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 413b16f80..1eb511db0 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -208,7 +208,7 @@ def test_message_with_complex_attributes(): "ccc": {"StringValue": "testjunk", "DataType": "String"}, "aaa": {"BinaryValue": b"\x02\x03\x04", "DataType": "Binary"}, "zzz": {"DataType": "Number", "StringValue": "0230.01"}, - "öthere_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, + "öther_encodings": {"DataType": "String", "StringValue": "T\xFCst"}, }, ) msg.get("MD5OfMessageBody").should.equal("58fd9edd83341c29f1aebba81c31e257") From 2ae09c5335c237105f61c12342ae97aa190efd73 Mon Sep 17 00:00:00 2001 From: Roque Pinel <1685896+repinel@users.noreply.github.com> Date: Tue, 14 Aug 2018 19:00:03 -0400 Subject: [PATCH 057/125] Fix the `StatusCode` returned by lambda invoke According to the AWS documentation: ``` The HTTP status code will be in the 200 range for successful request. For the RequestResponse invocation type this status code will be 200. For the Event invocation type this status code will be 202. For the DryRun invocation type the status code will be 204. ``` --- moto/awslambda/responses.py | 8 ++++++- tests/test_awslambda/test_lambda.py | 35 +++++++++++++++++++++++++++-- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 46203c10d..3d9b3ee3c 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -172,7 +172,13 @@ class LambdaResponse(BaseResponse): function_name, qualifier, self.body, self.headers, response_headers ) if payload: - return 202, response_headers, payload + if request.headers['X-Amz-Invocation-Type'] == 'Event': + status_code = 202 + elif request.headers['X-Amz-Invocation-Type'] == 'DryRun': + status_code = 204 + else: + status_code = 200 + return status_code, response_headers, payload else: return 404, response_headers, "{}" diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2835729f8..6601537fd 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -113,7 +113,7 @@ def test_invoke_requestresponse_function(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) @@ -151,6 +151,37 @@ def test_invoke_event_function(): json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) +@mock_lambda +def test_invoke_dryrun_function(): + conn = boto3.client('lambda', 'us-west-2') + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role=get_role_name(), + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file1(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + conn.invoke.when.called_with( + FunctionName='notAFunction', + InvocationType='Event', + Payload='{}' + ).should.throw(botocore.client.ClientError) + + in_data = {'msg': 'So long and thanks for all the fish'} + success_result = conn.invoke( + FunctionName='testFunction', InvocationType='DryRun', Payload=json.dumps(in_data)) + success_result["StatusCode"].should.equal(204) + json.loads(success_result['Payload'].read().decode( + 'utf-8')).should.equal({}) + + if settings.TEST_SERVER_MODE: @mock_ec2 @@ -179,7 +210,7 @@ if settings.TEST_SERVER_MODE: InvocationType="RequestResponse", Payload=json.dumps(in_data), ) - result["StatusCode"].should.equal(202) + result["StatusCode"].should.equal(200) actual_payload = json.loads(result["Payload"].read().decode("utf-8")) expected_payload = {"id": vol.id, "state": vol.state, "size": vol.size} actual_payload.should.equal(expected_payload) From ee8231202a82e09982c8ade459476f3b27b21d43 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 24 Jan 2020 09:08:48 +0000 Subject: [PATCH 058/125] Fix linting --- moto/awslambda/responses.py | 4 ++-- tests/test_awslambda/test_lambda.py | 28 +++++++++++++--------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 3d9b3ee3c..d79336e23 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -172,9 +172,9 @@ class LambdaResponse(BaseResponse): function_name, qualifier, self.body, self.headers, response_headers ) if payload: - if request.headers['X-Amz-Invocation-Type'] == 'Event': + if request.headers["X-Amz-Invocation-Type"] == "Event": status_code = 202 - elif request.headers['X-Amz-Invocation-Type'] == 'DryRun': + elif request.headers["X-Amz-Invocation-Type"] == "DryRun": status_code = 204 else: status_code = 200 diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6601537fd..8f8c03026 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -153,33 +153,31 @@ def test_invoke_event_function(): @mock_lambda def test_invoke_dryrun_function(): - conn = boto3.client('lambda', 'us-west-2') + conn = boto3.client("lambda", "us-west-2") conn.create_function( - FunctionName='testFunction', - Runtime='python2.7', + FunctionName="testFunction", + Runtime="python2.7", Role=get_role_name(), - Handler='lambda_function.lambda_handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, - Description='test lambda function', + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1(),}, + Description="test lambda function", Timeout=3, MemorySize=128, Publish=True, ) conn.invoke.when.called_with( - FunctionName='notAFunction', - InvocationType='Event', - Payload='{}' + FunctionName="notAFunction", InvocationType="Event", Payload="{}" ).should.throw(botocore.client.ClientError) - in_data = {'msg': 'So long and thanks for all the fish'} + in_data = {"msg": "So long and thanks for all the fish"} success_result = conn.invoke( - FunctionName='testFunction', InvocationType='DryRun', Payload=json.dumps(in_data)) + FunctionName="testFunction", + InvocationType="DryRun", + Payload=json.dumps(in_data), + ) success_result["StatusCode"].should.equal(204) - json.loads(success_result['Payload'].read().decode( - 'utf-8')).should.equal({}) + json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) if settings.TEST_SERVER_MODE: From ccd0257acc6326e17b9eb94ab40287501666e6d3 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Fri, 24 Jan 2020 10:09:56 +0000 Subject: [PATCH 059/125] Fix Lambda tests for DryRuns --- tests/test_awslambda/test_lambda.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 8f8c03026..446856f60 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -177,7 +177,6 @@ def test_invoke_dryrun_function(): Payload=json.dumps(in_data), ) success_result["StatusCode"].should.equal(204) - json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) if settings.TEST_SERVER_MODE: From 324ef5af36abe9898bfbe7ed30af376b6e4d6d59 Mon Sep 17 00:00:00 2001 From: Andrey Kislyuk Date: Fri, 24 Jan 2020 16:11:44 +0000 Subject: [PATCH 060/125] Fix test --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 2835729f8..9830f697f 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -148,7 +148,7 @@ def test_invoke_event_function(): FunctionName="testFunction", InvocationType="Event", Payload=json.dumps(in_data) ) success_result["StatusCode"].should.equal(202) - json.loads(success_result["Payload"].read().decode("utf-8")).should.equal({}) + json.loads(success_result["Payload"].read().decode("utf-8")).should.equal(in_data) if settings.TEST_SERVER_MODE: From 2a2ff32dec37a250e63430f3c5146ac8238909ab Mon Sep 17 00:00:00 2001 From: Brady Date: Thu, 23 Jan 2020 12:46:24 -0600 Subject: [PATCH 061/125] improves support for AWS lambda policy management --- moto/awslambda/exceptions.py | 10 ++ moto/awslambda/models.py | 27 ++++-- moto/awslambda/policy.py | 145 ++++++++++++++++++++++++++++ moto/awslambda/responses.py | 33 ++++--- moto/awslambda/urls.py | 3 +- tests/test_awslambda/test_lambda.py | 6 ++ tests/test_awslambda/test_policy.py | 104 ++++++++++++++++++++ 7 files changed, 310 insertions(+), 18 deletions(-) create mode 100644 moto/awslambda/policy.py create mode 100644 tests/test_awslambda/test_policy.py diff --git a/moto/awslambda/exceptions.py b/moto/awslambda/exceptions.py index 1a82977c3..08d13dce5 100644 --- a/moto/awslambda/exceptions.py +++ b/moto/awslambda/exceptions.py @@ -1,4 +1,5 @@ from botocore.client import ClientError +from moto.core.exceptions import JsonRESTError class LambdaClientError(ClientError): @@ -29,3 +30,12 @@ class InvalidRoleFormat(LambdaClientError): role, InvalidRoleFormat.pattern ) super(InvalidRoleFormat, self).__init__("ValidationException", message) + + +class PreconditionFailedException(JsonRESTError): + code = 412 + + def __init__(self, message): + super(PreconditionFailedException, self).__init__( + "PreconditionFailedException", message + ) diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 95a5c4ad5..ec327534a 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -25,6 +25,7 @@ import requests.adapters from boto3 import Session +from moto.awslambda.policy import Policy from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.iam.models import iam_backend @@ -47,7 +48,6 @@ from moto.core import ACCOUNT_ID logger = logging.getLogger(__name__) - try: from tempfile import TemporaryDirectory except ImportError: @@ -164,7 +164,8 @@ class LambdaFunction(BaseModel): self.logs_backend = logs_backends[self.region] self.environment_vars = spec.get("Environment", {}).get("Variables", {}) self.docker_client = docker.from_env() - self.policy = "" + self.policy = None + self.state = "Active" # Unfortunately mocking replaces this method w/o fallback enabled, so we # need to replace it if we detect it's been mocked @@ -274,11 +275,11 @@ class LambdaFunction(BaseModel): "MemorySize": self.memory_size, "Role": self.role, "Runtime": self.run_time, + "State": self.state, "Timeout": self.timeout, "Version": str(self.version), "VpcConfig": self.vpc_config, } - if self.environment_vars: config["Environment"] = {"Variables": self.environment_vars} @@ -709,7 +710,8 @@ class LambdaStorage(object): "versions": [], "alias": weakref.WeakValueDictionary(), } - + # instantiate a new policy for this version of the lambda + fn.policy = Policy(fn) self._arns[fn.function_arn] = fn def publish_function(self, name): @@ -1010,8 +1012,21 @@ class LambdaBackend(BaseBackend): return True return False - def add_policy(self, function_name, policy): - self.get_function(function_name).policy = policy + def add_policy_statement(self, function_name, raw): + fn = self.get_function(function_name) + fn.policy.add_statement(raw) + + def del_policy_statement(self, function_name, sid, revision=""): + fn = self.get_function(function_name) + fn.policy.del_statement(sid, revision) + + def get_policy(self, function_name): + fn = self.get_function(function_name) + return fn.policy.get_policy() + + def get_policy_wire_format(self, function_name): + fn = self.get_function(function_name) + return fn.policy.wire_format() def update_function_code(self, function_name, qualifier, body): fn = self.get_function(function_name, qualifier) diff --git a/moto/awslambda/policy.py b/moto/awslambda/policy.py new file mode 100644 index 000000000..66ec728f2 --- /dev/null +++ b/moto/awslambda/policy.py @@ -0,0 +1,145 @@ +from __future__ import unicode_literals + +import json +import uuid + +from six import string_types + +from moto.awslambda.exceptions import PreconditionFailedException + + +class Policy: + def __init__(self, parent): + self.revision = str(uuid.uuid4()) + self.statements = [] + self.parent = parent + + def __repr__(self): + return json.dumps(self.get_policy()) + + def wire_format(self): + return json.dumps( + { + "Policy": json.dumps( + { + "Version": "2012-10-17", + "Id": "default", + "Statement": self.statements, + } + ), + "RevisionId": self.revision, + } + ) + + def get_policy(self): + return { + "Policy": { + "Version": "2012-10-17", + "Id": "default", + "Statement": self.statements, + }, + "RevisionId": self.revision, + } + + # adds the raw JSON statement to the policy + def add_statement(self, raw): + policy = json.loads(raw, object_hook=self.decode_policy) + if len(policy.revision) > 0 and self.revision != policy.revision: + raise PreconditionFailedException( + "The RevisionId provided does not match the latest RevisionId" + " for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve" + " the latest RevisionId for your resource." + ) + self.statements.append(policy.statements[0]) + self.revision = str(uuid.uuid4()) + + # removes the statement that matches 'sid' from the policy + def del_statement(self, sid, revision=""): + if len(revision) > 0 and self.revision != revision: + raise PreconditionFailedException( + "The RevisionId provided does not match the latest RevisionId" + " for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve" + " the latest RevisionId for your resource." + ) + for statement in self.statements: + if "Sid" in statement and statement["Sid"] == sid: + self.statements.remove(statement) + + # converts AddPermission request to PolicyStatement + # https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html + def decode_policy(self, obj): + # import pydevd + # pydevd.settrace("localhost", port=5678) + policy = Policy(self.parent) + policy.revision = obj.get("RevisionId", "") + + # set some default values if these keys are not set + self.ensure_set(obj, "Effect", "Allow") + self.ensure_set(obj, "Resource", self.parent.function_arn + ":$LATEST") + self.ensure_set(obj, "StatementId", str(uuid.uuid4())) + + # transform field names and values + self.transform_property(obj, "StatementId", "Sid", self.nop_formatter) + self.transform_property(obj, "Principal", "Principal", self.principal_formatter) + self.transform_property( + obj, "SourceArn", "SourceArn", self.source_arn_formatter + ) + self.transform_property( + obj, "SourceAccount", "SourceAccount", self.source_account_formatter + ) + + # remove RevisionId and EventSourceToken if they are set + self.remove_if_set(obj, ["RevisionId", "EventSourceToken"]) + + # merge conditional statements into a single map under the Condition key + self.condition_merge(obj) + + # append resulting statement to policy.statements + policy.statements.append(obj) + + return policy + + def nop_formatter(self, obj): + return obj + + def ensure_set(self, obj, key, value): + if key not in obj: + obj[key] = value + + def principal_formatter(self, obj): + if isinstance(obj, string_types): + if obj.endswith(".amazonaws.com"): + return {"Service": obj} + if obj.endswith(":root"): + return {"AWS": obj} + return obj + + def source_account_formatter(self, obj): + return {"StringEquals": {"AWS:SourceAccount": obj}} + + def source_arn_formatter(self, obj): + return {"ArnLike": {"AWS:SourceArn": obj}} + + def transform_property(self, obj, old_name, new_name, formatter): + if old_name in obj: + obj[new_name] = formatter(obj[old_name]) + if new_name != old_name: + del obj[old_name] + + def remove_if_set(self, obj, keys): + for key in keys: + if key in obj: + del obj[key] + + def condition_merge(self, obj): + if "SourceArn" in obj: + if "Condition" not in obj: + obj["Condition"] = {} + obj["Condition"].update(obj["SourceArn"]) + del obj["SourceArn"] + + if "SourceAccount" in obj: + if "Condition" not in obj: + obj["Condition"] = {} + obj["Condition"].update(obj["SourceAccount"]) + del obj["SourceAccount"] diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 46203c10d..e1713ce52 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -120,8 +120,12 @@ class LambdaResponse(BaseResponse): self.setup_class(request, full_url, headers) if request.method == "GET": return self._get_policy(request, full_url, headers) - if request.method == "POST": + elif request.method == "POST": return self._add_policy(request, full_url, headers) + elif request.method == "DELETE": + return self._del_policy(request, full_url, headers, self.querystring) + else: + raise ValueError("Cannot handle {0} request".format(request.method)) def configuration(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -141,9 +145,9 @@ class LambdaResponse(BaseResponse): path = request.path if hasattr(request, "path") else path_url(request.url) function_name = path.split("/")[-2] if self.lambda_backend.get_function(function_name): - policy = self.body - self.lambda_backend.add_policy(function_name, policy) - return 200, {}, json.dumps(dict(Statement=policy)) + statement = self.body + self.lambda_backend.add_policy_statement(function_name, statement) + return 200, {}, json.dumps({"Statement": statement}) else: return 404, {}, "{}" @@ -151,14 +155,21 @@ class LambdaResponse(BaseResponse): path = request.path if hasattr(request, "path") else path_url(request.url) function_name = path.split("/")[-2] if self.lambda_backend.get_function(function_name): - lambda_function = self.lambda_backend.get_function(function_name) - return ( - 200, - {}, - json.dumps( - dict(Policy='{"Statement":[' + lambda_function.policy + "]}") - ), + out = self.lambda_backend.get_policy_wire_format(function_name) + return 200, {}, out + else: + return 404, {}, "{}" + + def _del_policy(self, request, full_url, headers, querystring): + path = request.path if hasattr(request, "path") else path_url(request.url) + function_name = path.split("/")[-3] + statement_id = path.split("/")[-1].split("?")[0] + revision = querystring.get("RevisionId", "") + if self.lambda_backend.get_function(function_name): + self.lambda_backend.del_policy_statement( + function_name, statement_id, revision ) + return 204, {}, "{}" else: return 404, {}, "{}" diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index da7346817..6c9b736a6 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -6,7 +6,7 @@ url_bases = ["https?://lambda.(.+).amazonaws.com"] response = LambdaResponse() url_paths = { - "{0}/(?P[^/]+)/functions/?$": response.root, + r"{0}/(?P[^/]+)/functions/?$": response.root, r"{0}/(?P[^/]+)/functions/(?P[\w_:%-]+)/?$": response.function, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/versions/?$": response.versions, r"{0}/(?P[^/]+)/event-source-mappings/?$": response.event_source_mappings, @@ -14,6 +14,7 @@ url_paths = { r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$": response.invoke, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$": response.invoke_async, r"{0}/(?P[^/]+)/tags/(?P.+)": response.tag, + r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/(?P[\w_-]+)$": response.policy, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/?$": response.policy, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/configuration/?$": response.configuration, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/code/?$": response.code, diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 6fd97e325..5f7eb0cfc 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -324,6 +324,7 @@ def test_create_function_from_aws_bucket(): "VpcId": "vpc-123abc", }, "ResponseMetadata": {"HTTPStatusCode": 201}, + "State": "Active", } ) @@ -367,6 +368,7 @@ def test_create_function_from_zipfile(): "Version": "1", "VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}, "ResponseMetadata": {"HTTPStatusCode": 201}, + "State": "Active", } ) @@ -631,6 +633,7 @@ def test_list_create_list_get_delete_list(): "Timeout": 3, "Version": "$LATEST", "VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}, + "State": "Active", }, "ResponseMetadata": {"HTTPStatusCode": 200}, } @@ -827,6 +830,7 @@ def test_get_function_created_with_zipfile(): "Timeout": 3, "Version": "$LATEST", "VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}, + "State": "Active", } ) @@ -1436,6 +1440,7 @@ def test_update_function_zip(): "Timeout": 3, "Version": "2", "VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}, + "State": "Active", } ) @@ -1498,6 +1503,7 @@ def test_update_function_s3(): "Timeout": 3, "Version": "2", "VpcConfig": {"SecurityGroupIds": [], "SubnetIds": []}, + "State": "Active", } ) diff --git a/tests/test_awslambda/test_policy.py b/tests/test_awslambda/test_policy.py new file mode 100644 index 000000000..2571926eb --- /dev/null +++ b/tests/test_awslambda/test_policy.py @@ -0,0 +1,104 @@ +from __future__ import unicode_literals + +import unittest +import json + +from moto.awslambda.policy import Policy + + +class MockLambdaFunction: + def __init__(self, arn): + self.function_arn = arn + self.policy = None + + +class TC: + def __init__(self, lambda_arn, statement, expected): + self.statement = statement + self.expected = expected + self.fn = MockLambdaFunction(lambda_arn) + self.policy = Policy(self.fn) + + def Run(self, parent): + self.policy.add_statement(json.dumps(self.statement)) + parent.assertDictEqual(self.expected, self.policy.statements[0]) + + sid = self.statement.get("StatementId", None) + if sid == None: + raise "TestCase.statement does not contain StatementId" + + self.policy.del_statement(sid) + parent.assertEqual([], self.policy.statements) + + +class TestPolicy(unittest.TestCase): + def test(self): + tt = [ + TC( + # lambda_arn + "arn", + { # statement + "StatementId": "statement0", + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": "events.amazonaws.com", + }, + { # expected + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": {"Service": "events.amazonaws.com"}, + "Effect": "Allow", + "Resource": "arn:$LATEST", + "Sid": "statement0", + }, + ), + TC( + # lambda_arn + "arn", + { # statement + "StatementId": "statement1", + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": "events.amazonaws.com", + "SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", + }, + { + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": {"Service": "events.amazonaws.com"}, + "Effect": "Allow", + "Resource": "arn:$LATEST", + "Sid": "statement1", + "Condition": { + "ArnLike": { + "AWS:SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name" + } + }, + }, + ), + TC( + # lambda_arn + "arn", + { # statement + "StatementId": "statement2", + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": "events.amazonaws.com", + "SourceAccount": "111111111111", + }, + { # expected + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": {"Service": "events.amazonaws.com"}, + "Effect": "Allow", + "Resource": "arn:$LATEST", + "Sid": "statement2", + "Condition": { + "StringEquals": {"AWS:SourceAccount": "111111111111"} + }, + }, + ), + ] + + for tc in tt: + tc.Run(self) From cb07d17e1388cce91abe7e8c9de55d04af58f1f8 Mon Sep 17 00:00:00 2001 From: Brady Date: Mon, 27 Jan 2020 10:35:46 -0500 Subject: [PATCH 062/125] don't use table of test cases and reduce duplication in get_policy method --- moto/awslambda/policy.py | 19 +---- tests/test_awslambda/test_policy.py | 116 ++++++++-------------------- 2 files changed, 35 insertions(+), 100 deletions(-) diff --git a/moto/awslambda/policy.py b/moto/awslambda/policy.py index 66ec728f2..495e2cee6 100644 --- a/moto/awslambda/policy.py +++ b/moto/awslambda/policy.py @@ -14,22 +14,10 @@ class Policy: self.statements = [] self.parent = parent - def __repr__(self): - return json.dumps(self.get_policy()) - def wire_format(self): - return json.dumps( - { - "Policy": json.dumps( - { - "Version": "2012-10-17", - "Id": "default", - "Statement": self.statements, - } - ), - "RevisionId": self.revision, - } - ) + p = self.get_policy() + p["Policy"] = json.dumps(p["Policy"]) + return json.dumps(p) def get_policy(self): return { @@ -81,6 +69,7 @@ class Policy: # transform field names and values self.transform_property(obj, "StatementId", "Sid", self.nop_formatter) self.transform_property(obj, "Principal", "Principal", self.principal_formatter) + self.transform_property( obj, "SourceArn", "SourceArn", self.source_arn_formatter ) diff --git a/tests/test_awslambda/test_policy.py b/tests/test_awslambda/test_policy.py index 2571926eb..d3fe3b0b2 100644 --- a/tests/test_awslambda/test_policy.py +++ b/tests/test_awslambda/test_policy.py @@ -12,93 +12,39 @@ class MockLambdaFunction: self.policy = None -class TC: - def __init__(self, lambda_arn, statement, expected): - self.statement = statement - self.expected = expected - self.fn = MockLambdaFunction(lambda_arn) - self.policy = Policy(self.fn) +class TestPolicy(unittest.TestCase): + def test_policy(self): + policy = Policy(MockLambdaFunction("arn")) + statement = { + "StatementId": "statement0", + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": "events.amazonaws.com", + "SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", + "SourceAccount": "111111111111", + } - def Run(self, parent): - self.policy.add_statement(json.dumps(self.statement)) - parent.assertDictEqual(self.expected, self.policy.statements[0]) + expected = { + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": {"Service": "events.amazonaws.com"}, + "Effect": "Allow", + "Resource": "arn:$LATEST", + "Sid": "statement0", + "Condition": { + "ArnLike": { + "AWS:SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", + }, + "StringEquals": {"AWS:SourceAccount": "111111111111"}, + }, + } - sid = self.statement.get("StatementId", None) + policy.add_statement(json.dumps(statement)) + self.assertDictEqual(expected, policy.statements[0]) + + sid = statement.get("StatementId", None) if sid == None: raise "TestCase.statement does not contain StatementId" - self.policy.del_statement(sid) - parent.assertEqual([], self.policy.statements) - - -class TestPolicy(unittest.TestCase): - def test(self): - tt = [ - TC( - # lambda_arn - "arn", - { # statement - "StatementId": "statement0", - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": "events.amazonaws.com", - }, - { # expected - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": {"Service": "events.amazonaws.com"}, - "Effect": "Allow", - "Resource": "arn:$LATEST", - "Sid": "statement0", - }, - ), - TC( - # lambda_arn - "arn", - { # statement - "StatementId": "statement1", - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": "events.amazonaws.com", - "SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", - }, - { - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": {"Service": "events.amazonaws.com"}, - "Effect": "Allow", - "Resource": "arn:$LATEST", - "Sid": "statement1", - "Condition": { - "ArnLike": { - "AWS:SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name" - } - }, - }, - ), - TC( - # lambda_arn - "arn", - { # statement - "StatementId": "statement2", - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": "events.amazonaws.com", - "SourceAccount": "111111111111", - }, - { # expected - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": {"Service": "events.amazonaws.com"}, - "Effect": "Allow", - "Resource": "arn:$LATEST", - "Sid": "statement2", - "Condition": { - "StringEquals": {"AWS:SourceAccount": "111111111111"} - }, - }, - ), - ] - - for tc in tt: - tc.Run(self) + policy.del_statement(sid) + self.assertEqual([], policy.statements) From 142e237b0bd4502af89942aea5e8256cb7535198 Mon Sep 17 00:00:00 2001 From: Owen Farrell Date: Mon, 27 Jan 2020 12:04:22 -0500 Subject: [PATCH 063/125] Made UserName parameter optional for IAM access key functions --- moto/iam/responses.py | 16 +++++++++++ tests/test_iam/test_iam.py | 57 ++++++++++++++++++++++++++++++++++++-- 2 files changed, 71 insertions(+), 2 deletions(-) diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 06561d4c4..12501769e 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -563,6 +563,10 @@ class IamResponse(BaseResponse): def create_access_key(self): user_name = self._get_param("UserName") + if not user_name: + access_key_id = self.get_current_user() + access_key = iam_backend.get_access_key_last_used(access_key_id) + user_name = access_key["user_name"] key = iam_backend.create_access_key(user_name) template = self.response_template(CREATE_ACCESS_KEY_TEMPLATE) @@ -572,6 +576,10 @@ class IamResponse(BaseResponse): user_name = self._get_param("UserName") access_key_id = self._get_param("AccessKeyId") status = self._get_param("Status") + if not user_name: + access_key = iam_backend.get_access_key_last_used(access_key_id) + user_name = access_key["user_name"] + iam_backend.update_access_key(user_name, access_key_id, status) template = self.response_template(GENERIC_EMPTY_TEMPLATE) return template.render(name="UpdateAccessKey") @@ -587,6 +595,11 @@ class IamResponse(BaseResponse): def list_access_keys(self): user_name = self._get_param("UserName") + if not user_name: + access_key_id = self.get_current_user() + access_key = iam_backend.get_access_key_last_used(access_key_id) + user_name = access_key["user_name"] + keys = iam_backend.get_all_access_keys(user_name) template = self.response_template(LIST_ACCESS_KEYS_TEMPLATE) return template.render(user_name=user_name, keys=keys) @@ -594,6 +607,9 @@ class IamResponse(BaseResponse): def delete_access_key(self): user_name = self._get_param("UserName") access_key_id = self._get_param("AccessKeyId") + if not user_name: + access_key = iam_backend.get_access_key_last_used(access_key_id) + user_name = access_key["user_name"] iam_backend.delete_access_key(access_key_id, user_name) template = self.response_template(GENERIC_EMPTY_TEMPLATE) diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index 9a2c1f0dd..995895437 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -785,7 +785,7 @@ def test_delete_login_profile(): conn.delete_login_profile("my-user") -@mock_iam() +@mock_iam def test_create_access_key(): conn = boto3.client("iam", region_name="us-east-1") with assert_raises(ClientError): @@ -798,6 +798,19 @@ def test_create_access_key(): access_key["AccessKeyId"].should.have.length_of(20) access_key["SecretAccessKey"].should.have.length_of(40) assert access_key["AccessKeyId"].startswith("AKIA") + conn = boto3.client( + "iam", + region_name="us-east-1", + aws_access_key_id=access_key["AccessKeyId"], + aws_secret_access_key=access_key["SecretAccessKey"], + ) + access_key = conn.create_access_key()["AccessKey"] + ( + datetime.utcnow() - access_key["CreateDate"].replace(tzinfo=None) + ).seconds.should.be.within(0, 10) + access_key["AccessKeyId"].should.have.length_of(20) + access_key["SecretAccessKey"].should.have.length_of(40) + assert access_key["AccessKeyId"].startswith("AKIA") @mock_iam_deprecated() @@ -825,8 +838,35 @@ def test_get_all_access_keys(): ) +@mock_iam +def test_list_access_keys(): + conn = boto3.client("iam", region_name="us-east-1") + conn.create_user(UserName="my-user") + response = conn.list_access_keys(UserName="my-user") + assert_equals( + response["AccessKeyMetadata"], [], + ) + access_key = conn.create_access_key(UserName="my-user")["AccessKey"] + response = conn.list_access_keys(UserName="my-user") + assert_equals( + sorted(response["AccessKeyMetadata"][0].keys()), + sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), + ) + conn = boto3.client( + "iam", + region_name="us-east-1", + aws_access_key_id=access_key["AccessKeyId"], + aws_secret_access_key=access_key["SecretAccessKey"], + ) + response = conn.list_access_keys() + assert_equals( + sorted(response["AccessKeyMetadata"][0].keys()), + sorted(["Status", "CreateDate", "UserName", "AccessKeyId"]), + ) + + @mock_iam_deprecated() -def test_delete_access_key(): +def test_delete_access_key_deprecated(): conn = boto.connect_iam() conn.create_user("my-user") access_key_id = conn.create_access_key("my-user")["create_access_key_response"][ @@ -835,6 +875,16 @@ def test_delete_access_key(): conn.delete_access_key(access_key_id, "my-user") +@mock_iam +def test_delete_access_key(): + conn = boto3.client("iam", region_name="us-east-1") + conn.create_user(UserName="my-user") + key = conn.create_access_key(UserName="my-user")["AccessKey"] + conn.delete_access_key(AccessKeyId=key["AccessKeyId"], UserName="my-user") + key = conn.create_access_key(UserName="my-user")["AccessKey"] + conn.delete_access_key(AccessKeyId=key["AccessKeyId"]) + + @mock_iam() def test_mfa_devices(): # Test enable device @@ -1326,6 +1376,9 @@ def test_update_access_key(): ) resp = client.list_access_keys(UserName=username) resp["AccessKeyMetadata"][0]["Status"].should.equal("Inactive") + client.update_access_key(AccessKeyId=key["AccessKeyId"], Status="Active") + resp = client.list_access_keys(UserName=username) + resp["AccessKeyMetadata"][0]["Status"].should.equal("Active") @mock_iam From 55a1c2fb590b333e43353a7bd01790990da07a5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADcio=20Matt=C3=A9?= Date: Tue, 28 Jan 2020 20:45:19 -0300 Subject: [PATCH 064/125] Support greedy resource path --- moto/apigateway/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index fd2fb7064..748a09e0f 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -556,7 +556,7 @@ class APIGatewayBackend(BaseBackend): return resource def create_resource(self, function_id, parent_resource_id, path_part): - if not re.match("^\\{?[a-zA-Z0-9._-]+\\}?$", path_part): + if not re.match("^\\{?[a-zA-Z0-9._-]+\\+?\\}?$", path_part): raise InvalidResourcePathException() api = self.get_rest_api(function_id) child = api.add_child(path=path_part, parent_id=parent_resource_id) From be8eab18e91f3b10e6b741c195e2b66cc4407ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADcio=20Matt=C3=A9?= Date: Tue, 28 Jan 2020 20:56:13 -0300 Subject: [PATCH 065/125] Update InvalidResourcePathException message --- moto/apigateway/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/apigateway/exceptions.py b/moto/apigateway/exceptions.py index 434ebc467..2a306ab99 100644 --- a/moto/apigateway/exceptions.py +++ b/moto/apigateway/exceptions.py @@ -39,7 +39,7 @@ class InvalidResourcePathException(BadRequestException): def __init__(self): super(InvalidResourcePathException, self).__init__( "BadRequestException", - "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end.", + "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace.", ) From dd243d72c2070aa0244a00e93c43f7309c99adde Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 29 Jan 2020 09:46:09 -0500 Subject: [PATCH 066/125] switch to using sure for assertions --- tests/test_awslambda/test_policy.py | 63 ++++++++++++++--------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/tests/test_awslambda/test_policy.py b/tests/test_awslambda/test_policy.py index d3fe3b0b2..92ef0087b 100644 --- a/tests/test_awslambda/test_policy.py +++ b/tests/test_awslambda/test_policy.py @@ -1,7 +1,7 @@ from __future__ import unicode_literals -import unittest import json +import sure from moto.awslambda.policy import Policy @@ -12,39 +12,38 @@ class MockLambdaFunction: self.policy = None -class TestPolicy(unittest.TestCase): - def test_policy(self): - policy = Policy(MockLambdaFunction("arn")) - statement = { - "StatementId": "statement0", - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": "events.amazonaws.com", - "SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", - "SourceAccount": "111111111111", - } +def test_policy(): + policy = Policy(MockLambdaFunction("arn")) + statement = { + "StatementId": "statement0", + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": "events.amazonaws.com", + "SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", + "SourceAccount": "111111111111", + } - expected = { - "Action": "lambda:InvokeFunction", - "FunctionName": "function_name", - "Principal": {"Service": "events.amazonaws.com"}, - "Effect": "Allow", - "Resource": "arn:$LATEST", - "Sid": "statement0", - "Condition": { - "ArnLike": { - "AWS:SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", - }, - "StringEquals": {"AWS:SourceAccount": "111111111111"}, + expected = { + "Action": "lambda:InvokeFunction", + "FunctionName": "function_name", + "Principal": {"Service": "events.amazonaws.com"}, + "Effect": "Allow", + "Resource": "arn:$LATEST", + "Sid": "statement0", + "Condition": { + "ArnLike": { + "AWS:SourceArn": "arn:aws:events:us-east-1:111111111111:rule/rule_name", }, - } + "StringEquals": {"AWS:SourceAccount": "111111111111"}, + }, + } - policy.add_statement(json.dumps(statement)) - self.assertDictEqual(expected, policy.statements[0]) + policy.add_statement(json.dumps(statement)) + expected.should.be.equal(policy.statements[0]) - sid = statement.get("StatementId", None) - if sid == None: - raise "TestCase.statement does not contain StatementId" + sid = statement.get("StatementId", None) + if sid == None: + raise "TestCase.statement does not contain StatementId" - policy.del_statement(sid) - self.assertEqual([], policy.statements) + policy.del_statement(sid) + [].should.be.equal(policy.statements) From cf65cfc6ec3a5ae50498ea9938ba8dbe826bb8f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADcio=20Matt=C3=A9?= Date: Wed, 29 Jan 2020 16:28:37 -0300 Subject: [PATCH 067/125] Update API Gateway resource name test --- tests/test_apigateway/test_apigateway.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 59c0c07f6..601aa2952 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -58,15 +58,15 @@ def test_create_resource__validate_name(): 0 ]["id"] - invalid_names = ["/users", "users/", "users/{user_id}", "us{er"] - valid_names = ["users", "{user_id}", "user_09", "good-dog"] + invalid_names = ["/users", "users/", "users/{user_id}", "us{er", "us+er"] + valid_names = ["users", "{user_id}", "{proxy+}", "user_09", "good-dog"] # All invalid names should throw an exception for name in invalid_names: with assert_raises(ClientError) as ex: client.create_resource(restApiId=api_id, parentId=root_id, pathPart=name) ex.exception.response["Error"]["Code"].should.equal("BadRequestException") ex.exception.response["Error"]["Message"].should.equal( - "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end." + "Resource's path part only allow a-zA-Z0-9._- and curly braces at the beginning and the end and an optional plus sign before the closing brace." ) # All valid names should go through for name in valid_names: From c877266f8648664045b7be9fa2519269c898b825 Mon Sep 17 00:00:00 2001 From: Brandon Bradley Date: Wed, 29 Jan 2020 16:27:56 -0600 Subject: [PATCH 068/125] fix 500 error on non-existing stack name --- moto/cloudformation/models.py | 2 ++ moto/cloudformation/responses.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 0ae5d1ae4..c05783fb4 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -677,6 +677,8 @@ class CloudFormationBackend(BaseBackend): def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) + if stack is None: + return [] return stack.stack_resources def delete_stack(self, name_or_stack_id): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index bf68a6325..7effb03fa 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -229,6 +229,9 @@ class CloudFormationResponse(BaseResponse): stack_name_or_id = self._get_param("StackName") resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id) + if not resources: + raise ValidationError(stack_name_or_id) + template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE) return template.render(resources=resources) From 44024ab74b8a3a4bb300a85d0c5f679d6465d3dd Mon Sep 17 00:00:00 2001 From: gruebel Date: Thu, 30 Jan 2020 22:42:27 +0100 Subject: [PATCH 069/125] Fix sqs permission handling & add more error handling --- moto/sqs/exceptions.py | 25 +++++ moto/sqs/models.py | 101 ++++++++++++++++--- tests/test_sqs/test_sqs.py | 198 +++++++++++++++++++++++++++++++++++-- 3 files changed, 303 insertions(+), 21 deletions(-) diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index 01123d777..77d7b9fb2 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -99,3 +99,28 @@ class InvalidAttributeName(RESTError): super(InvalidAttributeName, self).__init__( "InvalidAttributeName", "Unknown Attribute {}.".format(attribute_name) ) + + +class InvalidParameterValue(RESTError): + code = 400 + + def __init__(self, message): + super(InvalidParameterValue, self).__init__("InvalidParameterValue", message) + + +class MissingParameter(RESTError): + code = 400 + + def __init__(self): + super(MissingParameter, self).__init__( + "MissingParameter", "The request must contain the parameter Actions." + ) + + +class OverLimit(RESTError): + code = 403 + + def __init__(self, count): + super(OverLimit, self).__init__( + "OverLimit", "{} Actions were found, maximum allowed is 7.".format(count) + ) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 8b8263e3c..8fbe90108 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -30,6 +30,9 @@ from .exceptions import ( BatchEntryIdsNotDistinct, TooManyEntriesInBatchRequest, InvalidAttributeName, + InvalidParameterValue, + MissingParameter, + OverLimit, ) from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -183,6 +186,7 @@ class Queue(BaseModel): "MaximumMessageSize", "MessageRetentionPeriod", "QueueArn", + "Policy", "RedrivePolicy", "ReceiveMessageWaitTimeSeconds", "VisibilityTimeout", @@ -195,6 +199,8 @@ class Queue(BaseModel): "DeleteMessage", "GetQueueAttributes", "GetQueueUrl", + "ListDeadLetterSourceQueues", + "PurgeQueue", "ReceiveMessage", "SendMessage", ) @@ -273,7 +279,7 @@ class Queue(BaseModel): if key in bool_fields: value = value == "true" - if key == "RedrivePolicy" and value is not None: + if key in ["Policy", "RedrivePolicy"] and value is not None: continue setattr(self, camelcase_to_underscores(key), value) @@ -281,6 +287,9 @@ class Queue(BaseModel): if attributes.get("RedrivePolicy", None): self._setup_dlq(attributes["RedrivePolicy"]) + if attributes.get("Policy"): + self.policy = attributes["Policy"] + self.last_modified_timestamp = now def _setup_dlq(self, policy): @@ -472,6 +481,24 @@ class Queue(BaseModel): return self.name raise UnformattedGetAttTemplateException() + @property + def policy(self): + if self._policy_json.get("Statement"): + return json.dumps(self._policy_json) + else: + return None + + @policy.setter + def policy(self, policy): + if policy: + self._policy_json = json.loads(policy) + else: + self._policy_json = { + "Version": "2012-10-17", + "Id": "{}/SQSDefaultPolicy".format(self.queue_arn), + "Statement": [], + } + class SQSBackend(BaseBackend): def __init__(self, region_name): @@ -802,25 +829,75 @@ class SQSBackend(BaseBackend): def add_permission(self, queue_name, actions, account_ids, label): queue = self.get_queue(queue_name) - if actions is None or len(actions) == 0: - raise RESTError("InvalidParameterValue", "Need at least one Action") - if account_ids is None or len(account_ids) == 0: - raise RESTError("InvalidParameterValue", "Need at least one Account ID") + if not actions: + raise MissingParameter() - if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]): - raise RESTError("InvalidParameterValue", "Invalid permissions") + if not account_ids: + raise InvalidParameterValue( + "Value [] for parameter PrincipalId is invalid. Reason: Unable to verify." + ) - queue.permissions[label] = (account_ids, actions) + count = len(actions) + if count > 7: + raise OverLimit(count) + + invalid_action = next( + (action for action in actions if action not in Queue.ALLOWED_PERMISSIONS), + None, + ) + if invalid_action: + raise InvalidParameterValue( + "Value SQS:{} for parameter ActionName is invalid. " + "Reason: Only the queue owner is allowed to invoke this action.".format( + invalid_action + ) + ) + + policy = queue._policy_json + statement = next( + ( + statement + for statement in policy["Statement"] + if statement["Sid"] == label + ), + None, + ) + if statement: + raise InvalidParameterValue( + "Value {} for parameter Label is invalid. " + "Reason: Already exists.".format(label) + ) + + principals = [ + "arn:aws:iam::{}:root".format(account_id) for account_id in account_ids + ] + actions = ["SQS:{}".format(action) for action in actions] + + statement = { + "Sid": label, + "Effect": "Allow", + "Principal": {"AWS": principals[0] if len(principals) == 1 else principals}, + "Action": actions[0] if len(actions) == 1 else actions, + "Resource": queue.queue_arn, + } + + queue._policy_json["Statement"].append(statement) def remove_permission(self, queue_name, label): queue = self.get_queue(queue_name) - if label not in queue.permissions: - raise RESTError( - "InvalidParameterValue", "Permission doesnt exist for the given label" + statements = queue._policy_json["Statement"] + statements_new = [ + statement for statement in statements if statement["Sid"] != label + ] + + if len(statements) == len(statements_new): + raise InvalidParameterValue( + "Value {} for parameter Label is invalid. " + "Reason: can't find label on existing policy.".format(label) ) - del queue.permissions[label] + queue._policy_json["Statement"] = statements_new def tag_queue(self, queue_name, tags): queue = self.get_queue(queue_name) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 1eb511db0..93d388117 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -132,6 +132,35 @@ def test_create_queue_with_tags(): ) +@mock_sqs +def test_create_queue_with_policy(): + client = boto3.client("sqs", region_name="us-east-1") + response = client.create_queue( + QueueName="test-queue", + Attributes={ + "Policy": json.dumps( + { + "Version": "2012-10-17", + "Id": "test", + "Statement": [{"Effect": "Allow", "Principal": "*", "Action": "*"}], + } + ) + }, + ) + queue_url = response["QueueUrl"] + + response = client.get_queue_attributes( + QueueUrl=queue_url, AttributeNames=["Policy"] + ) + json.loads(response["Attributes"]["Policy"]).should.equal( + { + "Version": "2012-10-17", + "Id": "test", + "Statement": [{"Effect": "Allow", "Principal": "*", "Action": "*"}], + } + ) + + @mock_sqs def test_get_queue_url(): client = boto3.client("sqs", region_name="us-east-1") @@ -1186,18 +1215,169 @@ def test_permissions(): Actions=["SendMessage"], ) - with assert_raises(ClientError): - client.add_permission( - QueueUrl=queue_url, - Label="account2", - AWSAccountIds=["222211111111"], - Actions=["SomeRubbish"], - ) + response = client.get_queue_attributes( + QueueUrl=queue_url, AttributeNames=["Policy"] + ) + policy = json.loads(response["Attributes"]["Policy"]) + policy["Version"].should.equal("2012-10-17") + policy["Id"].should.equal( + "arn:aws:sqs:us-east-1:123456789012:test-dlr-queue.fifo/SQSDefaultPolicy" + ) + sorted(policy["Statement"], key=lambda x: x["Sid"]).should.equal( + [ + { + "Sid": "account1", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::111111111111:root"}, + "Action": "SQS:*", + "Resource": "arn:aws:sqs:us-east-1:123456789012:test-dlr-queue.fifo", + }, + { + "Sid": "account2", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::222211111111:root"}, + "Action": "SQS:SendMessage", + "Resource": "arn:aws:sqs:us-east-1:123456789012:test-dlr-queue.fifo", + }, + ] + ) client.remove_permission(QueueUrl=queue_url, Label="account2") - with assert_raises(ClientError): - client.remove_permission(QueueUrl=queue_url, Label="non_existent") + response = client.get_queue_attributes( + QueueUrl=queue_url, AttributeNames=["Policy"] + ) + json.loads(response["Attributes"]["Policy"]).should.equal( + { + "Version": "2012-10-17", + "Id": "arn:aws:sqs:us-east-1:123456789012:test-dlr-queue.fifo/SQSDefaultPolicy", + "Statement": [ + { + "Sid": "account1", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::111111111111:root"}, + "Action": "SQS:*", + "Resource": "arn:aws:sqs:us-east-1:123456789012:test-dlr-queue.fifo", + }, + ], + } + ) + + +@mock_sqs +def test_add_permission_errors(): + client = boto3.client("sqs", region_name="us-east-1") + response = client.create_queue(QueueName="test-queue") + queue_url = response["QueueUrl"] + client.add_permission( + QueueUrl=queue_url, + Label="test", + AWSAccountIds=["111111111111"], + Actions=["ReceiveMessage"], + ) + + with assert_raises(ClientError) as e: + client.add_permission( + QueueUrl=queue_url, + Label="test", + AWSAccountIds=["111111111111"], + Actions=["ReceiveMessage", "SendMessage"], + ) + ex = e.exception + ex.operation_name.should.equal("AddPermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterValue") + ex.response["Error"]["Message"].should.equal( + "Value test for parameter Label is invalid. " "Reason: Already exists." + ) + + with assert_raises(ClientError) as e: + client.add_permission( + QueueUrl=queue_url, + Label="test-2", + AWSAccountIds=["111111111111"], + Actions=["RemovePermission"], + ) + ex = e.exception + ex.operation_name.should.equal("AddPermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterValue") + ex.response["Error"]["Message"].should.equal( + "Value SQS:RemovePermission for parameter ActionName is invalid. " + "Reason: Only the queue owner is allowed to invoke this action." + ) + + with assert_raises(ClientError) as e: + client.add_permission( + QueueUrl=queue_url, + Label="test-2", + AWSAccountIds=["111111111111"], + Actions=[], + ) + ex = e.exception + ex.operation_name.should.equal("AddPermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("MissingParameter") + ex.response["Error"]["Message"].should.equal( + "The request must contain the parameter Actions." + ) + + with assert_raises(ClientError) as e: + client.add_permission( + QueueUrl=queue_url, + Label="test-2", + AWSAccountIds=[], + Actions=["ReceiveMessage"], + ) + ex = e.exception + ex.operation_name.should.equal("AddPermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterValue") + ex.response["Error"]["Message"].should.equal( + "Value [] for parameter PrincipalId is invalid. Reason: Unable to verify." + ) + + with assert_raises(ClientError) as e: + client.add_permission( + QueueUrl=queue_url, + Label="test-2", + AWSAccountIds=["111111111111"], + Actions=[ + "ChangeMessageVisibility", + "DeleteMessage", + "GetQueueAttributes", + "GetQueueUrl", + "ListDeadLetterSourceQueues", + "PurgeQueue", + "ReceiveMessage", + "SendMessage", + ], + ) + ex = e.exception + ex.operation_name.should.equal("AddPermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(403) + ex.response["Error"]["Code"].should.contain("OverLimit") + ex.response["Error"]["Message"].should.equal( + "8 Actions were found, maximum allowed is 7." + ) + + +@mock_sqs +def test_remove_permission_errors(): + client = boto3.client("sqs", region_name="us-east-1") + response = client.create_queue(QueueName="test-queue") + queue_url = response["QueueUrl"] + + with assert_raises(ClientError) as e: + client.remove_permission(QueueUrl=queue_url, Label="test") + ex = e.exception + ex.operation_name.should.equal("RemovePermission") + ex.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.response["Error"]["Code"].should.contain("InvalidParameterValue") + ex.response["Error"]["Message"].should.equal( + "Value test for parameter Label is invalid. " + "Reason: can't find label on existing policy." + ) @mock_sqs From b7795b7111158c3974040cf24ad1b56cfc1042c8 Mon Sep 17 00:00:00 2001 From: Brandon Bradley Date: Thu, 30 Jan 2020 16:35:19 -0600 Subject: [PATCH 070/125] test for ListStackResources --- .../test_cloudformation_stack_crud_boto3.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index 40fb2d669..a3e5097d7 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -522,6 +522,16 @@ def test_boto3_list_stack_set_operations(): list_operation["Summaries"][-1]["Action"].should.equal("UPDATE") +@mock_cloudformation +def test_boto3_bad_list_stack_resources(): + cf_conn = boto3.client("cloudformation", region_name="us-east-1") + cf_conn.create_stack_set( + StackSetName="test_stack_set", TemplateBody=dummy_template_json + ) + with assert_raises(ClientError): + cf_conn.list_stack_resources(StackName="test_stack_set") + + @mock_cloudformation def test_boto3_delete_stack_set(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") From 8b3c2b66544d82b91472fe3ad792ce5c18b773d3 Mon Sep 17 00:00:00 2001 From: Brandon Bradley Date: Thu, 30 Jan 2020 17:50:21 -0600 Subject: [PATCH 071/125] fix test --- .../test_cloudformation_stack_crud_boto3.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index a3e5097d7..b7e86a1d5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -525,9 +525,6 @@ def test_boto3_list_stack_set_operations(): @mock_cloudformation def test_boto3_bad_list_stack_resources(): cf_conn = boto3.client("cloudformation", region_name="us-east-1") - cf_conn.create_stack_set( - StackSetName="test_stack_set", TemplateBody=dummy_template_json - ) with assert_raises(ClientError): cf_conn.list_stack_resources(StackName="test_stack_set") From 40bd4f16039d908a0f25735f6b35e8c347ef2f95 Mon Sep 17 00:00:00 2001 From: gruebel Date: Fri, 31 Jan 2020 17:16:42 +0100 Subject: [PATCH 072/125] Fix kms.create_key default output --- moto/kms/models.py | 14 +++++++++++++- tests/test_kms/test_kms.py | 17 +++++++++++++++-- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index 22f0039b2..cceb96342 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -9,6 +9,8 @@ from boto3 import Session from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.iam.models import ACCOUNT_ID + from .utils import decrypt, encrypt, generate_key_id, generate_master_key @@ -21,11 +23,16 @@ class Key(BaseModel): self.description = description self.enabled = True self.region = region - self.account_id = "012345678912" + self.account_id = ACCOUNT_ID self.key_rotation_status = False self.deletion_date = None self.tags = tags or {} self.key_material = generate_master_key() + self.origin = "AWS_KMS" + self.key_manager = "CUSTOMER" + self.customer_master_key_spec = "SYMMETRIC_DEFAULT" + self.encryption_algorithms = ["SYMMETRIC_DEFAULT"] + self.signing_algorithms = None @property def physical_resource_id(self): @@ -43,11 +50,16 @@ class Key(BaseModel): "AWSAccountId": self.account_id, "Arn": self.arn, "CreationDate": iso_8601_datetime_without_milliseconds(datetime.now()), + "CustomerMasterKeySpec": self.customer_master_key_spec, "Description": self.description, "Enabled": self.enabled, + "EncryptionAlgorithms": self.encryption_algorithms, "KeyId": self.id, + "KeyManager": self.key_manager, "KeyUsage": self.key_usage, "KeyState": self.key_state, + "Origin": self.origin, + "SigningAlgorithms": self.signing_algorithms, } } if self.key_state == "PendingDeletion": diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 70fa68787..8c2843ee4 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -46,10 +46,23 @@ def test_create_key(): Tags=[{"TagKey": "project", "TagValue": "moto"}], ) + key["KeyMetadata"]["Arn"].should.equal( + "arn:aws:kms:us-east-1:123456789012:key/{}".format( + key["KeyMetadata"]["KeyId"] + ) + ) + key["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") + key["KeyMetadata"]["CreationDate"].should.be.a(datetime) + key["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") key["KeyMetadata"]["Description"].should.equal("my key") + key["KeyMetadata"]["Enabled"].should.be.ok + key["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) + key["KeyMetadata"]["KeyId"].should_not.be.empty + key["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") + key["KeyMetadata"]["KeyState"].should.equal("Enabled") key["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") - key["KeyMetadata"]["Enabled"].should.equal(True) - key["KeyMetadata"]["CreationDate"].should.be.a(date) + key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") + key["KeyMetadata"].should_not.have.key("SigningAlgorithms") @mock_kms_deprecated From 27ce0b7ab14f89ff87774bf04b4b44a44a61fca2 Mon Sep 17 00:00:00 2001 From: Asher Foa <1268088+asherf@users.noreply.github.com> Date: Fri, 31 Jan 2020 12:49:10 -0800 Subject: [PATCH 073/125] Botocore no longer needs an older version of python-dateutil. https://github.com/boto/botocore/pull/1910 https://github.com/boto/botocore/issues/1872 https://github.com/spulec/moto/pull/2570 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index d09f8fc7b..1dde71ac7 100755 --- a/setup.py +++ b/setup.py @@ -39,7 +39,7 @@ install_requires = [ "werkzeug", "PyYAML>=5.1", "pytz", - "python-dateutil<2.8.1,>=2.1", + "python-dateutil<3.0.0,>=2.1", "python-jose<4.0.0", "mock", "docker>=2.5.1", From 800e5ab7d2e25970515b9612f6e998d9434e76d4 Mon Sep 17 00:00:00 2001 From: Brandon Bradley Date: Sat, 1 Feb 2020 14:52:48 -0600 Subject: [PATCH 074/125] requested changes from review --- moto/cloudformation/models.py | 2 +- moto/cloudformation/responses.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index c05783fb4..b32d63b32 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -678,7 +678,7 @@ class CloudFormationBackend(BaseBackend): def list_stack_resources(self, stack_name_or_id): stack = self.get_stack(stack_name_or_id) if stack is None: - return [] + return None return stack.stack_resources def delete_stack(self, name_or_stack_id): diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 7effb03fa..77a3051fd 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -229,7 +229,7 @@ class CloudFormationResponse(BaseResponse): stack_name_or_id = self._get_param("StackName") resources = self.cloudformation_backend.list_stack_resources(stack_name_or_id) - if not resources: + if resources is None: raise ValidationError(stack_name_or_id) template = self.response_template(LIST_STACKS_RESOURCES_RESPONSE) From f74f08581a5fd11e82a89d92fa1a39e6bd04138e Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 10:36:51 +0000 Subject: [PATCH 075/125] S3 - Add IllegalLocationConstraint validation when creating buckets --- moto/s3/exceptions.py | 12 ++ moto/s3/responses.py | 22 +++- moto/s3/utils.py | 2 +- tests/test_s3/test_s3.py | 183 ++++++++++++++++---------- tests/test_s3/test_s3_lifecycle.py | 28 ++-- tests/test_s3/test_s3_storageclass.py | 30 +++-- 6 files changed, 182 insertions(+), 95 deletions(-) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 1f2ead639..bc339772e 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -127,6 +127,18 @@ class InvalidRequest(S3ClientError): ) +class IllegalLocationConstraintException(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(IllegalLocationConstraintException, self).__init__( + "IllegalLocationConstraintException", + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to.", + *args, + **kwargs + ) + + class MalformedXML(S3ClientError): code = 400 diff --git a/moto/s3/responses.py b/moto/s3/responses.py index a04427172..6041201bf 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -29,6 +29,7 @@ from .exceptions import ( InvalidPartOrder, MalformedXML, MalformedACLError, + IllegalLocationConstraintException, InvalidNotificationARN, InvalidNotificationEvent, ObjectNotInActiveTierError, @@ -585,6 +586,15 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): next_continuation_token = None return result_keys, is_truncated, next_continuation_token + def _body_contains_location_constraint(self, body): + if body: + try: + xmltodict.parse(body)["CreateBucketConfiguration"]["LocationConstraint"] + return True + except KeyError: + pass + return False + def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): @@ -680,10 +690,16 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return "" else: + # us-east-1, the default AWS region behaves a bit differently + # - you should not use it as a location constraint --> it fails + # - querying the location constraint returns None + # - LocationConstraint has to be specified if outside us-east-1 + if ( + region_name != DEFAULT_REGION_NAME + and not self._body_contains_location_constraint(body) + ): + raise IllegalLocationConstraintException() if body: - # us-east-1, the default AWS region behaves a bit differently - # - you should not use it as a location constraint --> it fails - # - querying the location constraint returns None try: forced_region = xmltodict.parse(body)["CreateBucketConfiguration"][ "LocationConstraint" diff --git a/moto/s3/utils.py b/moto/s3/utils.py index e7d9e5580..e22b6b860 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -37,7 +37,7 @@ def bucket_name_from_url(url): REGION_URL_REGEX = re.compile( r"^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|" - r"(.+)\.s3-(?P.+)\.amazonaws\.com)/?" + r"(.+)\.s3[-\.](?P.+)\.amazonaws\.com)/?" ) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 294beca87..afea4d55e 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -27,6 +27,7 @@ from parameterized import parameterized import six import requests import tests.backport_assert_raises # noqa +from moto.s3.responses import DEFAULT_REGION_NAME from nose import SkipTest from nose.tools import assert_raises @@ -68,7 +69,7 @@ class MyModel(object): self.value = value def save(self): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.put_object(Bucket="mybucket", Key=self.name, Body=self.value) @@ -119,7 +120,7 @@ def test_append_to_value__empty_key(): @mock_s3 def test_my_model_save(): # Create Bucket so that test can run - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") #################################### @@ -133,7 +134,7 @@ def test_my_model_save(): @mock_s3 def test_key_etag(): - conn = boto3.resource("s3", region_name="us-east-1") + conn = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) conn.create_bucket(Bucket="mybucket") model_instance = MyModel("steve", "is awesome") @@ -519,9 +520,9 @@ def test_bucket_with_dash(): def test_create_existing_bucket(): "Trying to create a bucket that already exists should raise an Error" conn = boto.s3.connect_to_region("us-west-2") - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="us-west-2") with assert_raises(S3CreateError): - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="us-west-2") @mock_s3_deprecated @@ -535,7 +536,7 @@ def test_create_existing_bucket_in_us_east_1(): us-east-1. In us-east-1 region, you will get 200 OK, but it is no-op (if bucket exists it Amazon S3 will not do anything). """ - conn = boto.s3.connect_to_region("us-east-1") + conn = boto.s3.connect_to_region(DEFAULT_REGION_NAME) conn.create_bucket("foobar") bucket = conn.create_bucket("foobar") bucket.name.should.equal("foobar") @@ -544,7 +545,7 @@ def test_create_existing_bucket_in_us_east_1(): @mock_s3_deprecated def test_other_region(): conn = S3Connection("key", "secret", host="s3-website-ap-southeast-2.amazonaws.com") - conn.create_bucket("foobar") + conn.create_bucket("foobar", location="ap-southeast-2") list(conn.get_bucket("foobar").get_all_keys()).should.equal([]) @@ -995,7 +996,9 @@ def test_bucket_acl_switching(): def test_s3_object_in_public_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="public-read") + bucket.create( + ACL="public-read", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") @@ -1026,7 +1029,9 @@ def test_s3_object_in_public_bucket(): def test_s3_object_in_private_bucket(): s3 = boto3.resource("s3") bucket = s3.Bucket("test-bucket") - bucket.create(ACL="private") + bucket.create( + ACL="private", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) bucket.put_object(ACL="private", Body=b"ABCD", Key="file.txt") s3_anonymous = boto3.resource("s3") @@ -1086,19 +1091,46 @@ def test_setting_content_encoding(): @mock_s3_deprecated def test_bucket_location(): conn = boto.s3.connect_to_region("us-west-2") - bucket = conn.create_bucket("mybucket") + bucket = conn.create_bucket("mybucket", location="us-west-2") bucket.get_location().should.equal("us-west-2") @mock_s3 -def test_bucket_location_us_east_1(): - cli = boto3.client("s3") +def test_bucket_location_default(): + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" # No LocationConstraint ==> us-east-1 cli.create_bucket(Bucket=bucket_name) cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal(None) +@mock_s3 +def test_bucket_location_nondefault(): + cli = boto3.client("s3", region_name="eu-central-1") + bucket_name = "mybucket" + # LocationConstraint set for non default regions + resp = cli.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) + cli.get_bucket_location(Bucket=bucket_name)["LocationConstraint"].should.equal( + "eu-central-1" + ) + + +@mock_s3 +def test_s3_location_should_error_outside_useast1(): + s3 = boto3.client("s3", region_name="eu-west-1") + + bucket_name = "asdfasdfsdfdsfasda" + + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket=bucket_name) + e.exception.response["Error"]["Message"].should.equal( + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." + ) + + @mock_s3_deprecated def test_ranged_get(): conn = boto.connect_s3() @@ -1222,7 +1254,7 @@ def test_key_with_trailing_slash_in_ordinary_calling_format(): @mock_s3 def test_boto3_key_etag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") resp = s3.get_object(Bucket="mybucket", Key="steve") @@ -1231,7 +1263,7 @@ def test_boto3_key_etag(): @mock_s3 def test_website_redirect_location(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="steve", Body=b"is awesome") @@ -1248,7 +1280,7 @@ def test_website_redirect_location(): @mock_s3 def test_boto3_list_objects_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1294,7 +1326,7 @@ def test_boto3_list_objects_truncated_response(): @mock_s3 def test_boto3_list_keys_xml_escaped(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") key_name = "Q&A.txt" s3.put_object(Bucket="mybucket", Key=key_name, Body=b"is awesome") @@ -1314,7 +1346,7 @@ def test_boto3_list_keys_xml_escaped(): @mock_s3 def test_boto3_list_objects_v2_common_prefix_pagination(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") max_keys = 1 @@ -1343,7 +1375,7 @@ def test_boto3_list_objects_v2_common_prefix_pagination(): @mock_s3 def test_boto3_list_objects_v2_truncated_response(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1400,7 +1432,7 @@ def test_boto3_list_objects_v2_truncated_response(): @mock_s3 def test_boto3_list_objects_v2_truncated_response_start_after(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"1") s3.put_object(Bucket="mybucket", Key="two", Body=b"22") @@ -1442,7 +1474,7 @@ def test_boto3_list_objects_v2_truncated_response_start_after(): @mock_s3 def test_boto3_list_objects_v2_fetch_owner(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="one", Body=b"11") @@ -1456,7 +1488,7 @@ def test_boto3_list_objects_v2_fetch_owner(): @mock_s3 def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") s3.put_object(Bucket="mybucket", Key="1/2", Body="") s3.put_object(Bucket="mybucket", Key="2", Body="") @@ -1486,7 +1518,7 @@ def test_boto3_list_objects_v2_truncate_combined_keys_and_folders(): @mock_s3 def test_boto3_bucket_create(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1511,10 +1543,10 @@ def test_bucket_create_duplicate(): @mock_s3 def test_bucket_create_force_us_east_1(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket( - Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "us-east-1"} + Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME} ) exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") @@ -1522,7 +1554,9 @@ def test_bucket_create_force_us_east_1(): @mock_s3 def test_boto3_bucket_create_eu_central(): s3 = boto3.resource("s3", region_name="eu-central-1") - s3.create_bucket(Bucket="blah") + s3.create_bucket( + Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-central-1"} + ) s3.Object("blah", "hello.txt").put(Body="some text") @@ -1533,7 +1567,7 @@ def test_boto3_bucket_create_eu_central(): @mock_s3 def test_boto3_head_object(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1551,7 +1585,7 @@ def test_boto3_head_object(): @mock_s3 def test_boto3_bucket_deletion(): - cli = boto3.client("s3", region_name="us-east-1") + cli = boto3.client("s3", region_name=DEFAULT_REGION_NAME) cli.create_bucket(Bucket="foobar") cli.put_object(Bucket="foobar", Key="the-key", Body="some value") @@ -1582,7 +1616,7 @@ def test_boto3_bucket_deletion(): @mock_s3 def test_boto3_get_object(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") s3.Object("blah", "hello.txt").put(Body="some text") @@ -1599,7 +1633,7 @@ def test_boto3_get_object(): @mock_s3 def test_boto3_get_missing_object_with_part_number(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="blah") with assert_raises(ClientError) as e: @@ -1612,7 +1646,7 @@ def test_boto3_get_missing_object_with_part_number(): @mock_s3 def test_boto3_head_object_with_versioning(): - s3 = boto3.resource("s3", region_name="us-east-1") + s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) bucket = s3.create_bucket(Bucket="blah") bucket.Versioning().enable() @@ -1642,7 +1676,7 @@ def test_boto3_head_object_with_versioning(): @mock_s3 def test_boto3_copy_object_with_versioning(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="blah", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1706,7 +1740,7 @@ def test_boto3_copy_object_with_versioning(): @mock_s3 def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket( Bucket="src", CreateBucketConfiguration={"LocationConstraint": "eu-west-1"} @@ -1730,7 +1764,7 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @mock_s3 def test_boto3_deleted_versionings_list(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1747,7 +1781,7 @@ def test_boto3_deleted_versionings_list(): @mock_s3 def test_boto3_delete_versioned_bucket(): - client = boto3.client("s3", region_name="us-east-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="blah") client.put_bucket_versioning( @@ -1762,7 +1796,7 @@ def test_boto3_delete_versioned_bucket(): @mock_s3 def test_boto3_get_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1782,7 +1816,7 @@ def test_boto3_get_object_if_modified_since(): @mock_s3 def test_boto3_head_object_if_modified_since(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "blah" s3.create_bucket(Bucket=bucket_name) @@ -1804,7 +1838,7 @@ def test_boto3_head_object_if_modified_since(): @reduced_min_part_size def test_boto3_multipart_etag(): # Create Bucket so that test can run - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") upload_id = s3.create_multipart_upload(Bucket="mybucket", Key="the-key")["UploadId"] @@ -1848,7 +1882,7 @@ def test_boto3_multipart_etag(): @mock_s3 @reduced_min_part_size def test_boto3_multipart_part_size(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="mybucket") mpu = s3.create_multipart_upload(Bucket="mybucket", Key="the-key") @@ -1883,7 +1917,7 @@ def test_boto3_multipart_part_size(): @mock_s3 def test_boto3_put_object_with_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -1897,7 +1931,7 @@ def test_boto3_put_object_with_tagging(): @mock_s3 def test_boto3_put_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -1944,7 +1978,7 @@ def test_boto3_put_bucket_tagging(): @mock_s3 def test_boto3_get_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_tagging( @@ -1975,7 +2009,7 @@ def test_boto3_get_bucket_tagging(): @mock_s3 def test_boto3_delete_bucket_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2002,7 +2036,7 @@ def test_boto3_delete_bucket_tagging(): @mock_s3 def test_boto3_put_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2062,7 +2096,7 @@ def test_boto3_put_bucket_cors(): @mock_s3 def test_boto3_get_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) @@ -2103,7 +2137,7 @@ def test_boto3_get_bucket_cors(): @mock_s3 def test_boto3_delete_bucket_cors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" s3.create_bucket(Bucket=bucket_name) s3.put_bucket_cors( @@ -2127,7 +2161,7 @@ def test_boto3_delete_bucket_cors(): @mock_s3 def test_put_bucket_acl_body(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] s3.put_bucket_acl( @@ -2225,7 +2259,7 @@ def test_put_bucket_acl_body(): @mock_s3 def test_put_bucket_notification(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With no configuration: @@ -2421,7 +2455,7 @@ def test_put_bucket_notification(): @mock_s3 def test_put_bucket_notification_errors(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket="bucket") # With incorrect ARNs: @@ -2488,7 +2522,7 @@ def test_put_bucket_notification_errors(): @mock_s3 def test_boto3_put_bucket_logging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" log_bucket = "logbucket" wrong_region_bucket = "wrongregionlogbucket" @@ -2667,7 +2701,7 @@ def test_boto3_put_bucket_logging(): @mock_s3 def test_boto3_put_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2711,7 +2745,7 @@ def test_boto3_put_object_tagging(): @mock_s3 def test_boto3_put_object_tagging_on_earliest_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2778,7 +2812,7 @@ def test_boto3_put_object_tagging_on_earliest_version(): @mock_s3 def test_boto3_put_object_tagging_on_both_version(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2858,7 +2892,7 @@ def test_boto3_put_object_tagging_on_both_version(): @mock_s3 def test_boto3_put_object_tagging_with_single_tag(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2876,7 +2910,7 @@ def test_boto3_put_object_tagging_with_single_tag(): @mock_s3 def test_boto3_get_object_tagging(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-tags" s3.create_bucket(Bucket=bucket_name) @@ -2905,7 +2939,7 @@ def test_boto3_get_object_tagging(): @mock_s3 def test_boto3_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2927,7 +2961,7 @@ def test_boto3_list_object_versions(): @mock_s3 def test_boto3_list_object_versions_with_versioning_disabled(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2950,7 +2984,7 @@ def test_boto3_list_object_versions_with_versioning_disabled(): @mock_s3 def test_boto3_list_object_versions_with_versioning_enabled_late(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" s3.create_bucket(Bucket=bucket_name) @@ -2978,7 +3012,7 @@ def test_boto3_list_object_versions_with_versioning_enabled_late(): @mock_s3 def test_boto3_bad_prefix_list_object_versions(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions" bad_prefix = "key-that-does-not-exist" @@ -2997,7 +3031,7 @@ def test_boto3_bad_prefix_list_object_versions(): @mock_s3 def test_boto3_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3040,7 +3074,7 @@ def test_boto3_delete_markers(): @mock_s3 def test_boto3_multiple_delete_markers(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" key = "key-with-versions-and-unicode-ó" s3.create_bucket(Bucket=bucket_name) @@ -3091,7 +3125,7 @@ def test_boto3_multiple_delete_markers(): def test_get_stream_gzipped(): payload = b"this is some stuff here" - s3_client = boto3.client("s3", region_name="us-east-1") + s3_client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3_client.create_bucket(Bucket="moto-tests") buffer_ = BytesIO() with GzipFile(fileobj=buffer_, mode="w") as f: @@ -3129,7 +3163,7 @@ TEST_XML = """\ @mock_s3 def test_boto3_bucket_name_too_long(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 64) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3137,7 +3171,7 @@ def test_boto3_bucket_name_too_long(): @mock_s3 def test_boto3_bucket_name_too_short(): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket(Bucket="x" * 2) exc.exception.response["Error"]["Code"].should.equal("InvalidBucketName") @@ -3146,7 +3180,7 @@ def test_boto3_bucket_name_too_short(): @mock_s3 def test_accelerated_none_when_unspecified(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.get_bucket_accelerate_configuration(Bucket=bucket_name) resp.shouldnt.have.key("Status") @@ -3155,7 +3189,7 @@ def test_accelerated_none_when_unspecified(): @mock_s3 def test_can_enable_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3171,7 +3205,7 @@ def test_can_enable_bucket_acceleration(): @mock_s3 def test_can_suspend_bucket_acceleration(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Enabled"} @@ -3191,7 +3225,10 @@ def test_can_suspend_bucket_acceleration(): def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): bucket_name = "some_bucket" s3 = boto3.client("s3") - s3.create_bucket(Bucket=bucket_name) + s3.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": "us-west-1"}, + ) resp = s3.put_bucket_accelerate_configuration( Bucket=bucket_name, AccelerateConfiguration={"Status": "Suspended"} ) @@ -3205,7 +3242,7 @@ def test_suspending_acceleration_on_not_configured_bucket_does_nothing(): @mock_s3 def test_accelerate_configuration_status_validation(): bucket_name = "some_bucket" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) with assert_raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( @@ -3217,7 +3254,7 @@ def test_accelerate_configuration_status_validation(): @mock_s3 def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): bucket_name = "some.bucket.with.dots" - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) s3.create_bucket(Bucket=bucket_name) with assert_raises(ClientError) as exc: s3.put_bucket_accelerate_configuration( @@ -3227,7 +3264,7 @@ def test_accelerate_configuration_is_not_supported_when_bucket_name_has_dots(): def store_and_read_back_a_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3255,7 +3292,7 @@ def test_root_dir_with_empty_name_works(): ) @mock_s3 def test_delete_objects_with_url_encoded_key(key): - s3 = boto3.client("s3", region_name="us-east-1") + s3 = boto3.client("s3", region_name=DEFAULT_REGION_NAME) bucket_name = "mybucket" body = b"Some body" @@ -3282,7 +3319,7 @@ def test_delete_objects_with_url_encoded_key(key): @mock_s3 @mock_config def test_public_access_block(): - client = boto3.client("s3") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="mybucket") # Try to get the public access block (should not exist by default) @@ -3349,7 +3386,7 @@ def test_public_access_block(): assert ce.exception.response["ResponseMetadata"]["HTTPStatusCode"] == 400 # Test that things work with AWS Config: - config_client = boto3.client("config", region_name="us-east-1") + config_client = boto3.client("config", region_name=DEFAULT_REGION_NAME) result = config_client.get_resource_config_history( resourceType="AWS::S3::Bucket", resourceId="mybucket" ) diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 260b248f1..0a2e66b5c 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -16,7 +16,7 @@ from moto import mock_s3_deprecated, mock_s3 @mock_s3_deprecated def test_lifecycle_create(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule("myid", "", "Enabled", 30) @@ -33,7 +33,9 @@ def test_lifecycle_create(): @mock_s3 def test_lifecycle_with_filters(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Create a lifecycle rule with a Filter (no tags): lfc = { @@ -245,7 +247,9 @@ def test_lifecycle_with_filters(): @mock_s3 def test_lifecycle_with_eodm(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -293,7 +297,9 @@ def test_lifecycle_with_eodm(): @mock_s3 def test_lifecycle_with_nve(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -327,7 +333,9 @@ def test_lifecycle_with_nve(): @mock_s3 def test_lifecycle_with_nvt(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -393,7 +401,9 @@ def test_lifecycle_with_nvt(): @mock_s3 def test_lifecycle_with_aimu(): client = boto3.client("s3") - client.create_bucket(Bucket="bucket") + client.create_bucket( + Bucket="bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) lfc = { "Rules": [ @@ -432,7 +442,7 @@ def test_lifecycle_with_aimu(): @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() transition = Transition(days=30, storage_class="GLACIER") @@ -451,7 +461,7 @@ def test_lifecycle_with_glacier_transition(): @mock_s3_deprecated def test_lifecycle_multi(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") date = "2022-10-12T00:00:00.000Z" sc = "GLACIER" @@ -493,7 +503,7 @@ def test_lifecycle_multi(): @mock_s3_deprecated def test_lifecycle_delete(): conn = boto.s3.connect_to_region("us-west-1") - bucket = conn.create_bucket("foobar") + bucket = conn.create_bucket("foobar", location="us-west-1") lifecycle = Lifecycle() lifecycle.add_rule(expiration=30) diff --git a/tests/test_s3/test_s3_storageclass.py b/tests/test_s3/test_s3_storageclass.py index dbdc85c42..f1a0479b2 100644 --- a/tests/test_s3/test_s3_storageclass.py +++ b/tests/test_s3/test_s3_storageclass.py @@ -11,7 +11,7 @@ from moto import mock_s3 @mock_s3 def test_s3_storage_class_standard(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") # add an object to the bucket with standard storage @@ -26,7 +26,9 @@ def test_s3_storage_class_standard(): @mock_s3 def test_s3_storage_class_infrequent_access(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-2"} + ) # add an object to the bucket with standard storage @@ -46,7 +48,9 @@ def test_s3_storage_class_infrequent_access(): def test_s3_storage_class_intelligent_tiering(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-east-2"} + ) s3.put_object( Bucket="Bucket", Key="my_key_infrequent", @@ -61,7 +65,7 @@ def test_s3_storage_class_intelligent_tiering(): @mock_s3 def test_s3_storage_class_copy(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -86,7 +90,7 @@ def test_s3_storage_class_copy(): @mock_s3 def test_s3_invalid_copied_storage_class(): - s3 = boto3.client("s3") + s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket="Bucket") s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="STANDARD" @@ -119,7 +123,9 @@ def test_s3_invalid_copied_storage_class(): @mock_s3 def test_s3_invalid_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) # Try to add an object with an invalid storage class with assert_raises(ClientError) as err: @@ -137,7 +143,9 @@ def test_s3_invalid_storage_class(): @mock_s3 def test_s3_default_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object(Bucket="Bucket", Key="First_Object", Body="Body") @@ -150,7 +158,9 @@ def test_s3_default_storage_class(): @mock_s3 def test_s3_copy_object_error_for_glacier_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="GLACIER" @@ -169,7 +179,9 @@ def test_s3_copy_object_error_for_glacier_storage_class(): @mock_s3 def test_s3_copy_object_error_for_deep_archive_storage_class(): s3 = boto3.client("s3") - s3.create_bucket(Bucket="Bucket") + s3.create_bucket( + Bucket="Bucket", CreateBucketConfiguration={"LocationConstraint": "us-west-1"} + ) s3.put_object( Bucket="Bucket", Key="First_Object", Body="Body", StorageClass="DEEP_ARCHIVE" From a86cba79de14285ffb050699d2891bd1701fe1cf Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 11:48:32 +0000 Subject: [PATCH 076/125] Add S3 LocationConstraint to Lambda tests --- tests/test_awslambda/test_lambda.py | 142 +++++++++++------- .../test_resourcegroupstaggingapi.py | 5 +- tests/test_s3/test_s3.py | 3 +- 3 files changed, 95 insertions(+), 55 deletions(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index dfd6431e7..48d04ef55 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -86,14 +86,14 @@ def lambda_handler(event, context): @mock_lambda def test_list_functions(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.list_functions() result["Functions"].should.have.length_of(0) @mock_lambda def test_invoke_requestresponse_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -126,7 +126,7 @@ def test_invoke_requestresponse_function(): @mock_lambda def test_invoke_event_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -156,11 +156,11 @@ if settings.TEST_SERVER_MODE: @mock_ec2 @mock_lambda def test_invoke_function_get_ec2_volume(): - conn = boto3.resource("ec2", "us-west-2") - vol = conn.create_volume(Size=99, AvailabilityZone="us-west-2") + conn = boto3.resource("ec2", _lambda_region) + vol = conn.create_volume(Size=99, AvailabilityZone=_lambda_region) vol = conn.Volume(vol.id) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python3.7", @@ -190,14 +190,14 @@ if settings.TEST_SERVER_MODE: @mock_ec2 @mock_lambda def test_invoke_function_from_sns(): - logs_conn = boto3.client("logs", region_name="us-west-2") - sns_conn = boto3.client("sns", region_name="us-west-2") + logs_conn = boto3.client("logs", region_name=_lambda_region) + sns_conn = boto3.client("sns", region_name=_lambda_region) sns_conn.create_topic(Name="some-topic") topics_json = sns_conn.list_topics() topics = topics_json["Topics"] topic_arn = topics[0]["TopicArn"] - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -240,7 +240,7 @@ def test_invoke_function_from_sns(): @mock_lambda def test_create_based_on_s3_with_missing_bucket(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function.when.called_with( FunctionName="testFunction", @@ -260,12 +260,15 @@ def test_create_based_on_s3_with_missing_bucket(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_create_function_from_aws_bucket(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) result = conn.create_function( FunctionName="testFunction", @@ -313,7 +316,7 @@ def test_create_function_from_aws_bucket(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_create_function_from_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -358,12 +361,15 @@ def test_create_function_from_zipfile(): @mock_s3 @freeze_time("2015-01-01 00:00:00") def test_get_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -427,7 +433,10 @@ def test_get_function(): def test_get_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -452,12 +461,15 @@ def test_get_function_by_arn(): @mock_lambda @mock_s3 def test_delete_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -488,7 +500,10 @@ def test_delete_function(): def test_delete_function_by_arn(): bucket_name = "test-bucket" s3_conn = boto3.client("s3", "us-east-1") - s3_conn.create_bucket(Bucket=bucket_name) + s3_conn.create_bucket( + Bucket=bucket_name, + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket=bucket_name, Key="test.zip", Body=zip_content) @@ -513,7 +528,7 @@ def test_delete_function_by_arn(): @mock_lambda def test_delete_unknown_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.delete_function.when.called_with( FunctionName="testFunctionThatDoesntExist" ).should.throw(botocore.client.ClientError) @@ -522,12 +537,15 @@ def test_delete_unknown_function(): @mock_lambda @mock_s3 def test_publish(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -572,12 +590,15 @@ def test_list_create_list_get_delete_list(): test `list -> create -> list -> get -> delete -> list` integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_functions()["Functions"].should.have.length_of(0) @@ -674,12 +695,15 @@ def test_tags(): """ test list_tags -> tag_resource -> list_tags -> tag_resource -> list_tags -> untag_resource -> list_tags integration """ - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) function = conn.create_function( FunctionName="testFunction", @@ -731,7 +755,7 @@ def test_tags_not_found(): """ Test list_tags and tag_resource when the lambda with the given arn does not exist """ - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.list_tags.when.called_with( Resource="arn:aws:lambda:{}:function:not-found".format(ACCOUNT_ID) ).should.throw(botocore.client.ClientError) @@ -749,7 +773,7 @@ def test_tags_not_found(): @mock_lambda def test_invoke_async_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", @@ -772,7 +796,7 @@ def test_invoke_async_function(): @mock_lambda @freeze_time("2015-01-01 00:00:00") def test_get_function_created_with_zipfile(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() result = conn.create_function( FunctionName="testFunction", @@ -818,7 +842,7 @@ def test_get_function_created_with_zipfile(): @mock_lambda def test_add_function_permission(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -849,7 +873,7 @@ def test_add_function_permission(): @mock_lambda def test_get_function_policy(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() conn.create_function( FunctionName="testFunction", @@ -884,12 +908,15 @@ def test_get_function_policy(): @mock_lambda @mock_s3 def test_list_versions_by_function(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -940,12 +967,15 @@ def test_list_versions_by_function(): @mock_lambda @mock_s3 def test_create_function_with_already_exists(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", @@ -977,7 +1007,7 @@ def test_create_function_with_already_exists(): @mock_lambda @mock_s3 def test_list_versions_by_function_for_nonexistent_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) versions = conn.list_versions_by_function(FunctionName="testFunction") assert len(versions["Versions"]) == 0 @@ -1326,12 +1356,15 @@ def test_delete_event_source_mapping(): @mock_lambda @mock_s3 def test_update_configuration(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file2() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunction", @@ -1374,7 +1407,7 @@ def test_update_configuration(): @mock_lambda def test_update_function_zip(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content_one = get_test_zip_file1() @@ -1429,13 +1462,16 @@ def test_update_function_zip(): @mock_lambda @mock_s3 def test_update_function_s3(): - s3_conn = boto3.client("s3", "us-west-2") - s3_conn.create_bucket(Bucket="test-bucket") + s3_conn = boto3.client("s3", _lambda_region) + s3_conn.create_bucket( + Bucket="test-bucket", + CreateBucketConfiguration={"LocationConstraint": _lambda_region}, + ) zip_content = get_test_zip_file1() s3_conn.put_object(Bucket="test-bucket", Key="test.zip", Body=zip_content) - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) fxn = conn.create_function( FunctionName="testFunctionS3", @@ -1516,7 +1552,7 @@ def test_create_function_with_unknown_arn(): def create_invalid_lambda(role): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) zip_content = get_test_zip_file1() with assert_raises(ClientError) as err: conn.create_function( @@ -1535,7 +1571,7 @@ def create_invalid_lambda(role): def get_role_name(): with mock_iam(): - iam = boto3.client("iam", region_name="us-west-2") + iam = boto3.client("iam", region_name=_lambda_region) try: return iam.get_role(RoleName="my-role")["Role"]["Arn"] except ClientError: diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index 84f7a8b86..3ee517ce8 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -21,7 +21,10 @@ def test_get_resources_s3(): # Create 4 buckets for i in range(1, 5): i_str = str(i) - s3_client.create_bucket(Bucket="test_bucket" + i_str) + s3_client.create_bucket( + Bucket="test_bucket" + i_str, + CreateBucketConfiguration={"LocationConstraint": "eu-central-1"}, + ) s3_client.put_bucket_tagging( Bucket="test_bucket" + i_str, Tagging={"TagSet": [{"Key": "key" + i_str, "Value": "value" + i_str}]}, diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index afea4d55e..33b4299a6 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1546,7 +1546,8 @@ def test_bucket_create_force_us_east_1(): s3 = boto3.resource("s3", region_name=DEFAULT_REGION_NAME) with assert_raises(ClientError) as exc: s3.create_bucket( - Bucket="blah", CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME} + Bucket="blah", + CreateBucketConfiguration={"LocationConstraint": DEFAULT_REGION_NAME}, ) exc.exception.response["Error"]["Code"].should.equal("InvalidLocationConstraint") From d5a36752d76f67036583705e17516c3337403dc6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 12:44:26 +0000 Subject: [PATCH 077/125] Add S3 LocationConstraint to CF tests --- tests/test_cloudformation/test_cloudformation_stack_crud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud.py b/tests/test_cloudformation/test_cloudformation_stack_crud.py index 75f705ea7..3d1b2ab8c 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud.py @@ -143,7 +143,7 @@ def test_create_stack_with_notification_arn(): @mock_s3_deprecated def test_create_stack_from_s3_url(): s3_conn = boto.s3.connect_to_region("us-west-1") - bucket = s3_conn.create_bucket("foobar") + bucket = s3_conn.create_bucket("foobar", location="us-west-1") key = boto.s3.key.Key(bucket) key.key = "template-key" key.set_contents_from_string(dummy_template_json) From ceb16b00a7dcfe36a9255c30aa4c7ab17317f3d6 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 12:45:57 +0000 Subject: [PATCH 078/125] S3 LocationConstraint test can only be run in non-ServerMode --- tests/test_s3/test_s3.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 33b4299a6..56cbe547b 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1118,17 +1118,22 @@ def test_bucket_location_nondefault(): ) -@mock_s3 -def test_s3_location_should_error_outside_useast1(): - s3 = boto3.client("s3", region_name="eu-west-1") +# Test uses current Region to determine whether to throw an error +# Region is retrieved based on current URL +# URL will always be localhost in Server Mode, so can't run it there +if not settings.TEST_SERVER_MODE: - bucket_name = "asdfasdfsdfdsfasda" + @mock_s3 + def test_s3_location_should_error_outside_useast1(): + s3 = boto3.client("s3", region_name="eu-west-1") - with assert_raises(ClientError) as e: - s3.create_bucket(Bucket=bucket_name) - e.exception.response["Error"]["Message"].should.equal( - "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." - ) + bucket_name = "asdfasdfsdfdsfasda" + + with assert_raises(ClientError) as e: + s3.create_bucket(Bucket=bucket_name) + e.exception.response["Error"]["Message"].should.equal( + "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." + ) @mock_s3_deprecated From e21ddb7abc2efb83b1fa2cefee8ee9bd9427b111 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 2 Feb 2020 14:25:44 +0000 Subject: [PATCH 079/125] Use var instead of hardcoded string for region --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 446856f60..4f587cdd8 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -153,7 +153,7 @@ def test_invoke_event_function(): @mock_lambda def test_invoke_dryrun_function(): - conn = boto3.client("lambda", "us-west-2") + conn = boto3.client("lambda", _lambda_region) conn.create_function( FunctionName="testFunction", Runtime="python2.7", From c36371e235a92a45dbd49aefca3cbd8d24c76fdd Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 12:47:54 -0300 Subject: [PATCH 080/125] Add failing test for database creation with iam --- tests/test_rds2/test_rds2.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 9a5a73678..6dc42fbc1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1689,3 +1689,18 @@ def test_create_parameter_group_with_tags(): ResourceName="arn:aws:rds:us-west-2:1234567890:pg:test" ) result["TagList"].should.equal([{"Value": "bar", "Key": "foo"}]) + + +@mock_rds2 +def test_create_database_with_iam_authentication(): + conn = boto3.client("rds", region_name="us-west-2") + + database = conn.create_db_instance( + DBInstanceIdentifier="rds", + DBInstanceClass="db.t1.micro", + Engine="postgres", + EnableIAMDatabaseAuthentication=True, + ) + + db_instance = database["DBInstance"] + db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(True) From ec66670315750fd1c60df369c8e12a05f0b4498e Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 12:50:46 -0300 Subject: [PATCH 081/125] Add enable_iam_database_authentication parameter in RDS2Response --- moto/rds2/responses.py | 1 + 1 file changed, 1 insertion(+) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 7c815b2d5..cdffdd40e 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -27,6 +27,7 @@ class RDS2Response(BaseResponse): "db_subnet_group_name": self._get_param("DBSubnetGroupName"), "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), + "enable_iam_database_authentication": self._get_bool_param("EnableIAMDatabaseAuthentication"), "license_model": self._get_param("LicenseModel"), "iops": self._get_int_param("Iops"), "kms_key_id": self._get_param("KmsKeyId"), From dfd21187e143e58f412d03a48c05529600f99cbe Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 12:55:05 -0300 Subject: [PATCH 082/125] Change iam_database_authentication_enabled to enabled_iam_database_authentication in accordance with aws docs --- moto/rds2/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index d2aa24a20..68acef0a0 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -130,7 +130,9 @@ class Database(BaseModel): if not self.option_group_name and self.engine in self.default_option_groups: self.option_group_name = self.default_option_groups[self.engine] self.character_set_name = kwargs.get("character_set_name", None) - self.iam_database_authentication_enabled = False + self.enable_iam_database_authentication = kwargs.get( + "enable_iam_database_authentication", False + ) self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U" self.tags = kwargs.get("tags", []) From 51e787fba6b24af7284394126ae5f1ced96a4f35 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 12:57:16 -0300 Subject: [PATCH 083/125] Add enable_iam_database_authentication in 'to_xml' method --- moto/rds2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 68acef0a0..aae708cdd 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -216,7 +216,7 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} - {{database.iam_database_authentication_enabled }} + {{database.enable_iam_database_authentication|lower }} {{ database.license_model }} {{ database.engine_version }} From eb0687eeaa3fb544c139c0dec43b2b6b828dbaa8 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 13:08:13 -0300 Subject: [PATCH 084/125] Add failing test for EnableIAMDatabaseAuthentication snapshot --- tests/test_rds2/test_rds2.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 6dc42fbc1..7e6670e9b 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1704,3 +1704,21 @@ def test_create_database_with_iam_authentication(): db_instance = database["DBInstance"] db_instance["IAMDatabaseAuthenticationEnabled"].should.equal(True) + + +@mock_rds2 +def test_create_db_snapshot_with_iam_authentication(): + conn = boto3.client("rds", region_name="us-west-2") + + conn.create_db_instance( + DBInstanceIdentifier="rds", + DBInstanceClass="db.t1.micro", + Engine="postgres", + EnableIAMDatabaseAuthentication=True, + ) + + snapshot = conn.create_db_snapshot( + DBInstanceIdentifier="rds", DBSnapshotIdentifier="snapshot" + ).get("DBSnapshot") + + snapshot.get("IAMDatabaseAuthenticationEnabled").should.equal(True) From 06e4cafd20aa5140d89a952a97b35c8833999194 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 13:10:04 -0300 Subject: [PATCH 085/125] Add enable_iam_database_authentication variable into snapshot 'to_xml' --- moto/rds2/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index aae708cdd..963af1c63 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -544,7 +544,7 @@ class Snapshot(BaseModel): {{ database.kms_key_id }} {{ snapshot.snapshot_arn }} - false + {{ database.enable_iam_database_authentication|lower }} """ ) return template.render(snapshot=self, database=self.database) From 9f8388e4021f082b2199d698d56da941c551dd53 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 13:19:50 -0300 Subject: [PATCH 086/125] Change test name in favor of abbreviation --- tests/test_rds2/test_rds2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 7e6670e9b..e93ff43e9 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -1692,7 +1692,7 @@ def test_create_parameter_group_with_tags(): @mock_rds2 -def test_create_database_with_iam_authentication(): +def test_create_db_with_iam_authentication(): conn = boto3.client("rds", region_name="us-west-2") database = conn.create_db_instance( From f0509276d892258aeee1f869b4ac1f08120feaa5 Mon Sep 17 00:00:00 2001 From: Guilherme Martins Crocetti Date: Sun, 2 Feb 2020 13:46:01 -0300 Subject: [PATCH 087/125] Apply black in responses.py --- moto/rds2/responses.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index cdffdd40e..b63e9f8b8 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -27,7 +27,9 @@ class RDS2Response(BaseResponse): "db_subnet_group_name": self._get_param("DBSubnetGroupName"), "engine": self._get_param("Engine"), "engine_version": self._get_param("EngineVersion"), - "enable_iam_database_authentication": self._get_bool_param("EnableIAMDatabaseAuthentication"), + "enable_iam_database_authentication": self._get_bool_param( + "EnableIAMDatabaseAuthentication" + ), "license_model": self._get_param("LicenseModel"), "iops": self._get_int_param("Iops"), "kms_key_id": self._get_param("KmsKeyId"), From c9995412b525769303c24171c7d9c54dd6b5098a Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 3 Feb 2020 10:21:22 -0600 Subject: [PATCH 088/125] add support for apigateway fields with default values including apiKeySource, endpointConfiguration, and tags --- moto/apigateway/models.py | 29 +++++- moto/apigateway/responses.py | 43 ++++++++- tests/test_apigateway/test_apigateway.py | 117 ++++++++++++++++++++++- 3 files changed, 184 insertions(+), 5 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index fd2fb7064..234d6636c 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -394,12 +394,17 @@ class UsagePlanKey(BaseModel, dict): class RestAPI(BaseModel): - def __init__(self, id, region_name, name, description): + def __init__(self, id, region_name, name, description, **kwargs): self.id = id self.region_name = region_name self.name = name self.description = description self.create_date = int(time.time()) + self.api_key_source = kwargs.get("api_key_source") or "HEADER" + self.endpoint_configuration = kwargs.get("endpoint_configuration") or { + "types": ["EDGE"] + } + self.tags = kwargs.get("tags") or {} self.deployments = {} self.stages = {} @@ -416,6 +421,9 @@ class RestAPI(BaseModel): "name": self.name, "description": self.description, "createdDate": int(time.time()), + "apiKeySource": self.api_key_source, + "endpointConfiguration": self.endpoint_configuration, + "tags": self.tags, } def add_child(self, path, parent_id=None): @@ -529,9 +537,24 @@ class APIGatewayBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def create_rest_api(self, name, description): + def create_rest_api( + self, + name, + description, + api_key_source=None, + endpoint_configuration=None, + tags=None, + ): api_id = create_id() - rest_api = RestAPI(api_id, self.region_name, name, description) + rest_api = RestAPI( + api_id, + self.region_name, + name, + description, + api_key_source=api_key_source, + endpoint_configuration=endpoint_configuration, + tags=tags, + ) self.apis[api_id] = rest_api return rest_api diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index c4c7b403e..e10d670c5 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -12,6 +12,9 @@ from .exceptions import ( ApiKeyAlreadyExists, ) +API_KEY_SOURCES = ["AUTHORIZER", "HEADER"] +ENDPOINT_CONFIGURATION_TYPES = ["PRIVATE", "EDGE", "REGIONAL"] + class APIGatewayResponse(BaseResponse): def error(self, type_, message, status=400): @@ -45,7 +48,45 @@ class APIGatewayResponse(BaseResponse): elif self.method == "POST": name = self._get_param("name") description = self._get_param("description") - rest_api = self.backend.create_rest_api(name, description) + api_key_source = self._get_param("apiKeySource") + endpoint_configuration = self._get_param("endpointConfiguration") + tags = self._get_param("tags") + + # Param validation + if api_key_source and api_key_source not in API_KEY_SOURCES: + return self.error( + "ValidationException", + ( + "1 validation error detected: " + "Value '{api_key_source}' at 'createRestApiInput.apiKeySource' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[AUTHORIZER, HEADER]" + ).format(api_key_source=api_key_source), + ) + + if endpoint_configuration and "types" in endpoint_configuration: + invalid_types = list( + set(endpoint_configuration["types"]) + - set(ENDPOINT_CONFIGURATION_TYPES) + ) + if invalid_types: + return self.error( + "ValidationException", + ( + "1 validation error detected: Value '{endpoint_type}' " + "at 'createRestApiInput.endpointConfiguration.types' failed " + "to satisfy constraint: Member must satisfy enum value set: " + "[PRIVATE, EDGE, REGIONAL]" + ).format(endpoint_type=invalid_types[0]), + ) + + rest_api = self.backend.create_rest_api( + name, + description, + api_key_source=api_key_source, + endpoint_configuration=endpoint_configuration, + tags=tags, + ) return 200, {}, json.dumps(rest_api.to_dict()) def restapis_individual(self, request, full_url, headers): diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 59c0c07f6..37bcc97f7 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -26,7 +26,14 @@ def test_create_and_get_rest_api(): response.pop("ResponseMetadata") response.pop("createdDate") response.should.equal( - {"id": api_id, "name": "my_api", "description": "this is my api"} + { + "id": api_id, + "name": "my_api", + "description": "this is my api", + "apiKeySource": "HEADER", + "endpointConfiguration": {"types": ["EDGE"]}, + "tags": {}, + } ) @@ -47,6 +54,114 @@ def test_list_and_delete_apis(): len(response["items"]).should.equal(1) +@mock_apigateway +def test_create_rest_api_with_tags(): + client = boto3.client("apigateway", region_name="us-west-2") + + response = client.create_rest_api( + name="my_api", description="this is my api", tags={"MY_TAG1": "MY_VALUE1"} + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + + assert "tags" in response + response["tags"].should.equal({"MY_TAG1": "MY_VALUE1"}) + + +@mock_apigateway +def test_create_rest_api_invalid_apikeysource(): + client = boto3.client("apigateway", region_name="us-west-2") + + with assert_raises(ClientError) as ex: + client.create_rest_api( + name="my_api", + description="this is my api", + apiKeySource="not a valid api key source", + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + + +@mock_apigateway +def test_create_rest_api_valid_apikeysources(): + client = boto3.client("apigateway", region_name="us-west-2") + + # 1. test creating rest api with HEADER apiKeySource + response = client.create_rest_api( + name="my_api", description="this is my api", apiKeySource="HEADER", + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + response["apiKeySource"].should.equal("HEADER") + + # 2. test creating rest api with AUTHORIZER apiKeySource + response = client.create_rest_api( + name="my_api2", description="this is my api", apiKeySource="AUTHORIZER", + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + response["apiKeySource"].should.equal("AUTHORIZER") + + +@mock_apigateway +def test_create_rest_api_invalid_endpointconfiguration(): + client = boto3.client("apigateway", region_name="us-west-2") + + with assert_raises(ClientError) as ex: + client.create_rest_api( + name="my_api", + description="this is my api", + endpointConfiguration={"types": ["INVALID"]}, + ) + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + + +@mock_apigateway +def test_create_rest_api_valid_endpointconfigurations(): + client = boto3.client("apigateway", region_name="us-west-2") + + # 1. test creating rest api with PRIVATE endpointConfiguration + response = client.create_rest_api( + name="my_api", + description="this is my api", + endpointConfiguration={"types": ["PRIVATE"]}, + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + response["endpointConfiguration"].should.equal( + {"types": ["PRIVATE"],} + ) + + # 2. test creating rest api with REGIONAL endpointConfiguration + response = client.create_rest_api( + name="my_api2", + description="this is my api", + endpointConfiguration={"types": ["REGIONAL"]}, + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + response["endpointConfiguration"].should.equal( + {"types": ["REGIONAL"],} + ) + + # 3. test creating rest api with EDGE endpointConfiguration + response = client.create_rest_api( + name="my_api3", + description="this is my api", + endpointConfiguration={"types": ["EDGE"]}, + ) + api_id = response["id"] + + response = client.get_rest_api(restApiId=api_id) + response["endpointConfiguration"].should.equal( + {"types": ["EDGE"],} + ) + + @mock_apigateway def test_create_resource__validate_name(): client = boto3.client("apigateway", region_name="us-west-2") From 6d64b12b4117b4b85af92f7aeca8a78e49fa9bc8 Mon Sep 17 00:00:00 2001 From: rossjones Date: Tue, 4 Feb 2020 10:02:43 +0000 Subject: [PATCH 089/125] Remove ResourceWarnings when loading AMIS and INSTANCE_TYPES When loading AMIS and INSTANCE_TYPES in moto.ec2.models a file handle is potentially leaked when loading the JSON. This results in a ResourceWarning which is a bit of unnecessary noise. Rather than pass a call to open() to json.load() this instead uses a context-manager in a small private helper function. This fixes https://github.com/spulec/moto/issues/2620 --- moto/ec2/models.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 93a350914..a0c886087 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -139,17 +139,22 @@ from .utils import ( rsa_public_key_fingerprint, ) -INSTANCE_TYPES = json.load( - open(resource_filename(__name__, "resources/instance_types.json"), "r") + +def _load_resource(filename): + with open(filename, "r") as f: + return json.load(f) + + +INSTANCE_TYPES = _load_resource( + resource_filename(__name__, "resources/instance_types.json") ) -AMIS = json.load( - open( - os.environ.get("MOTO_AMIS_PATH") - or resource_filename(__name__, "resources/amis.json"), - "r", - ) + +AMIS = _load_resource( + os.environ.get("MOTO_AMIS_PATH") + or resource_filename(__name__, "resources/amis.json"), ) + OWNER_ID = "111122223333" From 4f0c06ca5322274e9d7c1744f86115b354755e84 Mon Sep 17 00:00:00 2001 From: Jay Udey Date: Tue, 4 Feb 2020 14:04:45 -0600 Subject: [PATCH 090/125] handle map or list parameters --- moto/ses/models.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/moto/ses/models.py b/moto/ses/models.py index eacdd8458..4b6ce52c8 100644 --- a/moto/ses/models.py +++ b/moto/ses/models.py @@ -148,11 +148,15 @@ class SESBackend(BaseBackend): def __type_of_message__(self, destinations): """Checks the destination for any special address that could indicate delivery, complaint or bounce like in SES simulator""" - alladdress = ( - destinations.get("ToAddresses", []) - + destinations.get("CcAddresses", []) - + destinations.get("BccAddresses", []) - ) + if isinstance(destinations, list): + alladdress = destinations + else: + alladdress = ( + destinations.get("ToAddresses", []) + + destinations.get("CcAddresses", []) + + destinations.get("BccAddresses", []) + ) + for addr in alladdress: if SESFeedback.SUCCESS_ADDR in addr: return SESFeedback.DELIVERY From bb64258a8f2fd994d40405f76440c47f12012ef9 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Tue, 4 Feb 2020 17:06:55 -0800 Subject: [PATCH 091/125] Fixed issue with Lambda invoke via ARN - Fixed an issue where Lambda invokes via an ARN was hitting real AWS. --- moto/awslambda/responses.py | 3 ++- moto/awslambda/urls.py | 1 + tests/test_awslambda/test_lambda.py | 37 +++++++++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index e1713ce52..bac670b8e 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -176,7 +176,8 @@ class LambdaResponse(BaseResponse): def _invoke(self, request, full_url): response_headers = {} - function_name = self.path.rsplit("/", 2)[-2] + # URL Decode in case it's a ARN: + function_name = unquote(self.path.rsplit("/", 2)[-2]) qualifier = self._get_param("qualifier") response_header, payload = self.lambda_backend.invoke( diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 6c9b736a6..c25e58dba 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -12,6 +12,7 @@ url_paths = { r"{0}/(?P[^/]+)/event-source-mappings/?$": response.event_source_mappings, r"{0}/(?P[^/]+)/event-source-mappings/(?P[\w_-]+)/?$": response.event_source_mapping, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$": response.invoke, + r"{0}/(?P[^/]+)/functions/(?P.+)/invocations/?$": response.invoke, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$": response.invoke_async, r"{0}/(?P[^/]+)/tags/(?P.+)": response.tag, r"{0}/(?P[^/]+)/functions/(?P[\w_-]+)/policy/(?P[\w_-]+)$": response.policy, diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index dfd6431e7..4db13d220 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -124,6 +124,43 @@ def test_invoke_requestresponse_function(): json.loads(payload).should.equal(in_data) +@mock_lambda +def test_invoke_requestresponse_function_with_arn(): + from moto.awslambda.models import ACCOUNT_ID + + conn = boto3.client("lambda", "us-west-2") + conn.create_function( + FunctionName="testFunction", + Runtime="python2.7", + Role=get_role_name(), + Handler="lambda_function.lambda_handler", + Code={"ZipFile": get_test_zip_file1()}, + Description="test lambda function", + Timeout=3, + MemorySize=128, + Publish=True, + ) + + in_data = {"msg": "So long and thanks for all the fish"} + success_result = conn.invoke( + FunctionName="arn:aws:lambda:us-west-2:{}:function:testFunction".format( + ACCOUNT_ID + ), + InvocationType="RequestResponse", + Payload=json.dumps(in_data), + ) + + success_result["StatusCode"].should.equal(202) + result_obj = json.loads( + base64.b64decode(success_result["LogResult"]).decode("utf-8") + ) + + result_obj.should.equal(in_data) + + payload = success_result["Payload"].read().decode("utf-8") + json.loads(payload).should.equal(in_data) + + @mock_lambda def test_invoke_event_function(): conn = boto3.client("lambda", "us-west-2") From 4bae0339c2f09b84639c64a7f7776bbc03aa87e5 Mon Sep 17 00:00:00 2001 From: Ivan Dromigny Date: Wed, 5 Feb 2020 12:03:24 +0100 Subject: [PATCH 092/125] Add Filter parameter for cognitoidp list_users() --- moto/cognitoidp/responses.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index 80247b076..a170b7541 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -279,9 +279,13 @@ class CognitoIdpResponse(BaseResponse): user_pool_id = self._get_param("UserPoolId") limit = self._get_param("Limit") token = self._get_param("PaginationToken") + filt = self._get_param("Filter") users, token = cognitoidp_backends[self.region].list_users( user_pool_id, limit=limit, pagination_token=token ) + if filt: + name, value = filt.replace('"', '').split('=') + users = [user for user in users for attribute in user.attributes if attribute['Name'] == name and attribute['Value'] == value] response = {"Users": [user.to_json(extended=True) for user in users]} if token: response["PaginationToken"] = str(token) From 8115dd2d1b0b6b89a8ceba50d8a7edc309f49e52 Mon Sep 17 00:00:00 2001 From: Ivan Dromigny Date: Wed, 5 Feb 2020 12:03:33 +0100 Subject: [PATCH 093/125] Add test --- tests/test_cognitoidp/test_cognitoidp.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 6a13683f0..27a6841f4 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -958,6 +958,15 @@ def test_list_users(): result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username) + username_bis = str(uuid.uuid4()) + conn.admin_create_user( + UserPoolId=user_pool_id, Username=username_bis, + UserAttributes=[{'Name': 'phone_number', 'Value': '+33666666666'}] + ) + result = conn.list_users(UserPoolId=user_pool_id, Filter='phone_number="+33666666666') + result["Users"].should.have.length_of(1) + result["Users"][0]["Username"].should.equal(username_bis) + @mock_cognitoidp def test_list_users_returns_limit_items(): From d8d057711dcf88a526b7bb454fa21e6050f122b4 Mon Sep 17 00:00:00 2001 From: Ivan Dromigny Date: Wed, 5 Feb 2020 14:19:08 +0100 Subject: [PATCH 094/125] Change from black linter --- moto/cognitoidp/responses.py | 9 +++++++-- tests/test_cognitoidp/test_cognitoidp.py | 9 ++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index a170b7541..fa3b7b0b5 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -284,8 +284,13 @@ class CognitoIdpResponse(BaseResponse): user_pool_id, limit=limit, pagination_token=token ) if filt: - name, value = filt.replace('"', '').split('=') - users = [user for user in users for attribute in user.attributes if attribute['Name'] == name and attribute['Value'] == value] + name, value = filt.replace('"', "").split("=") + users = [ + user + for user in users + for attribute in user.attributes + if attribute["Name"] == name and attribute["Value"] == value + ] response = {"Users": [user.to_json(extended=True) for user in users]} if token: response["PaginationToken"] = str(token) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 27a6841f4..2f7ed11e5 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -960,10 +960,13 @@ def test_list_users(): username_bis = str(uuid.uuid4()) conn.admin_create_user( - UserPoolId=user_pool_id, Username=username_bis, - UserAttributes=[{'Name': 'phone_number', 'Value': '+33666666666'}] + UserPoolId=user_pool_id, + Username=username_bis, + UserAttributes=[{"Name": "phone_number", "Value": "+33666666666"}], + ) + result = conn.list_users( + UserPoolId=user_pool_id, Filter='phone_number="+33666666666' ) - result = conn.list_users(UserPoolId=user_pool_id, Filter='phone_number="+33666666666') result["Users"].should.have.length_of(1) result["Users"][0]["Username"].should.equal(username_bis) From 1321943d60eaab533396fa32d30a831b34b4474e Mon Sep 17 00:00:00 2001 From: Jay Udey Date: Wed, 5 Feb 2020 09:03:45 -0600 Subject: [PATCH 095/125] add test verifying solution --- tests/test_ses/test_ses_sns_boto3.py | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) diff --git a/tests/test_ses/test_ses_sns_boto3.py b/tests/test_ses/test_ses_sns_boto3.py index fc58d88aa..43d4000bf 100644 --- a/tests/test_ses/test_ses_sns_boto3.py +++ b/tests/test_ses/test_ses_sns_boto3.py @@ -40,6 +40,8 @@ def __setup_feedback_env__( ) # Verify SES domain ses_conn.verify_domain_identity(Domain=domain) + # Specify email address to allow for raw e-mails to be processed + ses_conn.verify_email_identity(EmailAddress="test@example.com") # Setup SES notification topic if expected_msg is not None: ses_conn.set_identity_notification_topic( @@ -47,7 +49,7 @@ def __setup_feedback_env__( ) -def __test_sns_feedback__(addr, expected_msg): +def __test_sns_feedback__(addr, expected_msg, raw_email=False): region_name = "us-east-1" ses_conn = boto3.client("ses", region_name=region_name) sns_conn = boto3.client("sns", region_name=region_name) @@ -73,7 +75,18 @@ def __test_sns_feedback__(addr, expected_msg): "Body": {"Text": {"Data": "test body"}}, }, ) - ses_conn.send_email(**kwargs) + if raw_email: + kwargs.pop("Message") + kwargs.pop("Destination") + kwargs.update( + { + "Destinations": [addr + "@" + domain], + "RawMessage": {"Data": bytearray("raw_email", "utf-8")}, + } + ) + ses_conn.send_raw_email(**kwargs) + else: + ses_conn.send_email(**kwargs) # Wait for messages in the queues queue = sqs_conn.get_queue_by_name(QueueName=queue) @@ -112,3 +125,12 @@ def test_sns_feedback_complaint(): @mock_ses def test_sns_feedback_delivery(): __test_sns_feedback__(SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY) + + +@mock_sqs +@mock_sns +@mock_ses +def test_sns_feedback_delivery_raw_email(): + __test_sns_feedback__( + SESFeedback.SUCCESS_ADDR, SESFeedback.DELIVERY, raw_email=True + ) From 414f8086b0210ca6522183c14a3ab6a188689766 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 10:30:59 -0500 Subject: [PATCH 096/125] use sure for unit test assertions --- tests/test_utilities/test_tagging_service.py | 109 ++++++++++--------- 1 file changed, 57 insertions(+), 52 deletions(-) diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py index 1cd820a19..0d7db3e25 100644 --- a/tests/test_utilities/test_tagging_service.py +++ b/tests/test_utilities/test_tagging_service.py @@ -1,59 +1,64 @@ -import unittest +import sure from moto.utilities.tagging_service import TaggingService -class TestTaggingService(unittest.TestCase): - def test_list_empty(self): - svc = TaggingService() - result = svc.list_tags_for_resource("test") - self.assertEqual(result, {"Tags": []}) +def test_list_empty(): + svc = TaggingService() + result = svc.list_tags_for_resource("test") - def test_create_tag(self): - svc = TaggingService("TheTags", "TagKey", "TagValue") - tags = [{"TagKey": "key_key", "TagValue": "value_value"}] - svc.tag_resource("arn", tags) - actual = svc.list_tags_for_resource("arn") - expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} - self.assertDictEqual(expected, actual) - - def test_create_tag_without_value(self): - svc = TaggingService() - tags = [{"Key": "key_key"}] - svc.tag_resource("arn", tags) - actual = svc.list_tags_for_resource("arn") - expected = {"Tags": [{"Key": "key_key", "Value": None}]} - self.assertDictEqual(expected, actual) - - def test_delete_tag_using_names(self): - svc = TaggingService() - tags = [{"Key": "key_key", "Value": "value_value"}] - svc.tag_resource("arn", tags) - svc.untag_resource_using_names("arn", ["key_key"]) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_list_empty_delete(self): - svc = TaggingService() - svc.untag_resource_using_names("arn", ["key_key"]) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_delete_tag_using_tags(self): - svc = TaggingService() - tags = [{"Key": "key_key", "Value": "value_value"}] - svc.tag_resource("arn", tags) - svc.untag_resource_using_tags("arn", tags) - result = svc.list_tags_for_resource("arn") - self.assertEqual(result, {"Tags": []}) - - def test_extract_tag_names(self): - svc = TaggingService() - tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] - actual = svc.extract_tag_names(tags) - expected = ["key1", "key2"] - self.assertEqual(expected, actual) + {"Tags": []}.should.be.equal(result) -if __name__ == "__main__": - unittest.main() +def test_create_tag(): + svc = TaggingService("TheTags", "TagKey", "TagValue") + tags = [{"TagKey": "key_key", "TagValue": "value_value"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"TheTags": [{"TagKey": "key_key", "TagValue": "value_value"}]} + + expected.should.be.equal(actual) + +def test_create_tag_without_value(): + svc = TaggingService() + tags = [{"Key": "key_key"}] + svc.tag_resource("arn", tags) + actual = svc.list_tags_for_resource("arn") + expected = {"Tags": [{"Key": "key_key", "Value": None}]} + + expected.should.be.equal(actual) + +def test_delete_tag_using_names(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + +def test_list_empty_delete(): + svc = TaggingService() + svc.untag_resource_using_names("arn", ["key_key"]) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + +def test_delete_tag_using_tags(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + svc.tag_resource("arn", tags) + svc.untag_resource_using_tags("arn", tags) + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + +def test_extract_tag_names(): + svc = TaggingService() + tags = [{"Key": "key1", "Value": "value1"}, {"Key": "key2", "Value": "value2"}] + actual = svc.extract_tag_names(tags) + expected = ["key1", "key2"] + + expected.should.be.equal(actual) + From c95254a2843fac342e702f7708cce63274a053d0 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 11:58:52 -0500 Subject: [PATCH 097/125] delete tags when their resource is deleted --- moto/events/models.py | 2 ++ moto/utilities/tagging_service.py | 3 +++ tests/test_utilities/test_tagging_service.py | 17 ++++++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/events/models.py b/moto/events/models.py index 695cfb17a..82723ac6c 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -143,6 +143,8 @@ class EventsBackend(BaseBackend): def delete_rule(self, name): self.rules_order.pop(self.rules_order.index(name)) + arn = self.rules.get(name).arn + self.tagger.delete_all_tags_for_resource(arn) return self.rules.pop(name) is not None def describe_rule(self, name): diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index 8c7a86f1d..c56fd2306 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -12,6 +12,9 @@ class TaggingService: result.append({self.keyName: k, self.valueName: v}) return {self.tagName: result} + def delete_all_tags_for_resource(self, arn): + del self.tags[arn] + def tag_resource(self, arn, tags): if arn not in self.tags: self.tags[arn] = {} diff --git a/tests/test_utilities/test_tagging_service.py b/tests/test_utilities/test_tagging_service.py index 0d7db3e25..249e903fe 100644 --- a/tests/test_utilities/test_tagging_service.py +++ b/tests/test_utilities/test_tagging_service.py @@ -19,6 +19,7 @@ def test_create_tag(): expected.should.be.equal(actual) + def test_create_tag_without_value(): svc = TaggingService() tags = [{"Key": "key_key"}] @@ -28,6 +29,7 @@ def test_create_tag_without_value(): expected.should.be.equal(actual) + def test_delete_tag_using_names(): svc = TaggingService() tags = [{"Key": "key_key", "Value": "value_value"}] @@ -37,6 +39,19 @@ def test_delete_tag_using_names(): {"Tags": []}.should.be.equal(result) + +def test_delete_all_tags_for_resource(): + svc = TaggingService() + tags = [{"Key": "key_key", "Value": "value_value"}] + tags2 = [{"Key": "key_key2", "Value": "value_value2"}] + svc.tag_resource("arn", tags) + svc.tag_resource("arn", tags2) + svc.delete_all_tags_for_resource("arn") + result = svc.list_tags_for_resource("arn") + + {"Tags": []}.should.be.equal(result) + + def test_list_empty_delete(): svc = TaggingService() svc.untag_resource_using_names("arn", ["key_key"]) @@ -44,6 +59,7 @@ def test_list_empty_delete(): {"Tags": []}.should.be.equal(result) + def test_delete_tag_using_tags(): svc = TaggingService() tags = [{"Key": "key_key", "Value": "value_value"}] @@ -61,4 +77,3 @@ def test_extract_tag_names(): expected = ["key1", "key2"] expected.should.be.equal(actual) - From 5b5510218156ada78990432bf3d07157c68e611d Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 15:30:34 -0500 Subject: [PATCH 098/125] fix test case --- moto/events/models.py | 3 ++- moto/utilities/tagging_service.py | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/moto/events/models.py b/moto/events/models.py index 82723ac6c..a80b86daa 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -144,7 +144,8 @@ class EventsBackend(BaseBackend): def delete_rule(self, name): self.rules_order.pop(self.rules_order.index(name)) arn = self.rules.get(name).arn - self.tagger.delete_all_tags_for_resource(arn) + if self.tagger.has_tags(arn): + self.tagger.delete_all_tags_for_resource(arn) return self.rules.pop(name) is not None def describe_rule(self, name): diff --git a/moto/utilities/tagging_service.py b/moto/utilities/tagging_service.py index c56fd2306..89b857277 100644 --- a/moto/utilities/tagging_service.py +++ b/moto/utilities/tagging_service.py @@ -15,6 +15,9 @@ class TaggingService: def delete_all_tags_for_resource(self, arn): del self.tags[arn] + def has_tags(self, arn): + return arn in self.tags + def tag_resource(self, arn, tags): if arn not in self.tags: self.tags[arn] = {} From ecdedf30c87fdd321d910374972ec1808bc1b7a1 Mon Sep 17 00:00:00 2001 From: Brady Date: Wed, 5 Feb 2020 16:31:33 -0500 Subject: [PATCH 099/125] force build... --- tests/test_events/test_events.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index 4fb3b4029..4ecb2d882 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -484,22 +484,22 @@ def test_rule_tagging_happy(): @mock_events def test_rule_tagging_sad(): - b = EventsBackend("us-west-2") + back_end = EventsBackend("us-west-2") try: - b.tag_resource("unknown", []) + back_end.tag_resource("unknown", []) raise "tag_resource should fail if ResourceARN is not known" except JsonRESTError: pass try: - b.untag_resource("unknown", []) + back_end.untag_resource("unknown", []) raise "untag_resource should fail if ResourceARN is not known" except JsonRESTError: pass try: - b.list_tags_for_resource("unknown") + back_end.list_tags_for_resource("unknown") raise "list_tags_for_resource should fail if ResourceARN is not known" except JsonRESTError: pass From 14ebf29a61119649b00469ab6400948d603cb0e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Tomak?= Date: Thu, 6 Feb 2020 11:49:41 +0100 Subject: [PATCH 100/125] Add UpdateOrganizationalUnit endpoint to Organizations API --- moto/organizations/models.py | 5 +++++ moto/organizations/responses.py | 5 +++++ .../test_organizations_boto3.py | 17 +++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 42e4dd00a..9be129fa7 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -222,6 +222,11 @@ class OrganizationsBackend(BaseBackend): self.attach_policy(PolicyId=utils.DEFAULT_POLICY_ID, TargetId=new_ou.id) return new_ou.describe() + def update_organizational_unit(self, **kwargs): + ou = self.get_organizational_unit_by_id(kwargs["OrganizationalUnitId"]) + ou.name = kwargs["Name"] + return ou.describe() + def get_organizational_unit_by_id(self, ou_id): ou = next((ou for ou in self.ou if ou.id == ou_id), None) if ou is None: diff --git a/moto/organizations/responses.py b/moto/organizations/responses.py index 7c42eb4ec..ba7dd4453 100644 --- a/moto/organizations/responses.py +++ b/moto/organizations/responses.py @@ -36,6 +36,11 @@ class OrganizationsResponse(BaseResponse): self.organizations_backend.create_organizational_unit(**self.request_params) ) + def update_organizational_unit(self): + return json.dumps( + self.organizations_backend.update_organizational_unit(**self.request_params) + ) + def describe_organizational_unit(self): return json.dumps( self.organizations_backend.describe_organizational_unit( diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index dd79ae787..ab3ddf671 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -713,3 +713,20 @@ def test_untag_resource_errors(): ex.response["Error"]["Message"].should.equal( "You provided a value that does not match the required pattern." ) + + +@mock_organizations +def test_update_organizational_unit(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_name = "ou01" + response = client.create_organizational_unit(ParentId=root_id, Name=ou_name) + validate_organizational_unit(org, response) + response["OrganizationalUnit"]["Name"].should.equal(ou_name) + new_ou_name = "ou02" + response = client.update_organizational_unit( + OrganizationalUnitId=response["OrganizationalUnit"]["Id"], Name=new_ou_name + ) + validate_organizational_unit(org, response) + response["OrganizationalUnit"]["Name"].should.equal(new_ou_name) From fc9eab25919eea0759e1b3146ad111532d1ddfa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Tomak?= Date: Thu, 6 Feb 2020 12:38:37 +0100 Subject: [PATCH 101/125] Raise DuplicateOrganizationalUnitException Calling UpdateOrganizationalUnit with name that already exists should raise proper error. --- moto/organizations/exceptions.py | 10 +++++++++ moto/organizations/models.py | 8 ++++++- .../test_organizations_boto3.py | 21 +++++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/moto/organizations/exceptions.py b/moto/organizations/exceptions.py index 01b98da7e..b40908862 100644 --- a/moto/organizations/exceptions.py +++ b/moto/organizations/exceptions.py @@ -10,3 +10,13 @@ class InvalidInputException(JsonRESTError): "InvalidInputException", "You provided a value that does not match the required pattern.", ) + + +class DuplicateOrganizationalUnitException(JsonRESTError): + code = 400 + + def __init__(self): + super(DuplicateOrganizationalUnitException, self).__init__( + "DuplicateOrganizationalUnitException", + "An OU with the same name already exists.", + ) diff --git a/moto/organizations/models.py b/moto/organizations/models.py index 9be129fa7..0db069f9a 100644 --- a/moto/organizations/models.py +++ b/moto/organizations/models.py @@ -8,7 +8,10 @@ from moto.core import BaseBackend, BaseModel from moto.core.exceptions import RESTError from moto.core.utils import unix_time from moto.organizations import utils -from moto.organizations.exceptions import InvalidInputException +from moto.organizations.exceptions import ( + InvalidInputException, + DuplicateOrganizationalUnitException, +) class FakeOrganization(BaseModel): @@ -223,6 +226,9 @@ class OrganizationsBackend(BaseBackend): return new_ou.describe() def update_organizational_unit(self, **kwargs): + for ou in self.ou: + if ou.name == kwargs["Name"]: + raise DuplicateOrganizationalUnitException ou = self.get_organizational_unit_by_id(kwargs["OrganizationalUnitId"]) ou.name = kwargs["Name"] return ou.describe() diff --git a/tests/test_organizations/test_organizations_boto3.py b/tests/test_organizations/test_organizations_boto3.py index ab3ddf671..876e83712 100644 --- a/tests/test_organizations/test_organizations_boto3.py +++ b/tests/test_organizations/test_organizations_boto3.py @@ -730,3 +730,24 @@ def test_update_organizational_unit(): ) validate_organizational_unit(org, response) response["OrganizationalUnit"]["Name"].should.equal(new_ou_name) + + +@mock_organizations +def test_update_organizational_unit_duplicate_error(): + client = boto3.client("organizations", region_name="us-east-1") + org = client.create_organization(FeatureSet="ALL")["Organization"] + root_id = client.list_roots()["Roots"][0]["Id"] + ou_name = "ou01" + response = client.create_organizational_unit(ParentId=root_id, Name=ou_name) + validate_organizational_unit(org, response) + response["OrganizationalUnit"]["Name"].should.equal(ou_name) + with assert_raises(ClientError) as e: + client.update_organizational_unit( + OrganizationalUnitId=response["OrganizationalUnit"]["Id"], Name=ou_name + ) + exc = e.exception + exc.operation_name.should.equal("UpdateOrganizationalUnit") + exc.response["Error"]["Code"].should.contain("DuplicateOrganizationalUnitException") + exc.response["Error"]["Message"].should.equal( + "An OU with the same name already exists." + ) From 5d050444915ab0b8798e9895f99e8f6bbdbccd6c Mon Sep 17 00:00:00 2001 From: gruebel Date: Thu, 6 Feb 2020 17:57:00 +0100 Subject: [PATCH 102/125] Add CustomerMasterKeySpec parameter handling --- moto/kms/models.py | 43 ++++++++++++++++++++++++++++----- moto/kms/responses.py | 3 ++- tests/test_kms/test_kms.py | 47 ++++++++++++++++++++++++++++++++++++ tests/test_kms/test_utils.py | 8 +++--- 4 files changed, 90 insertions(+), 11 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index cceb96342..1015aa72a 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -15,7 +15,7 @@ from .utils import decrypt, encrypt, generate_key_id, generate_master_key class Key(BaseModel): - def __init__(self, policy, key_usage, description, tags, region): + def __init__(self, policy, key_usage, customer_master_key_spec, description, tags, region): self.id = generate_key_id() self.policy = policy self.key_usage = key_usage @@ -30,9 +30,7 @@ class Key(BaseModel): self.key_material = generate_master_key() self.origin = "AWS_KMS" self.key_manager = "CUSTOMER" - self.customer_master_key_spec = "SYMMETRIC_DEFAULT" - self.encryption_algorithms = ["SYMMETRIC_DEFAULT"] - self.signing_algorithms = None + self.customer_master_key_spec = customer_master_key_spec or "SYMMETRIC_DEFAULT" @property def physical_resource_id(self): @@ -44,6 +42,38 @@ class Key(BaseModel): self.region, self.account_id, self.id ) + @property + def encryption_algorithms(self): + if self.key_usage == "SIGN_VERIFY": + return None + elif self.customer_master_key_spec == "SYMMETRIC_DEFAULT": + return ["SYMMETRIC_DEFAULT"] + else: + return [ + "RSAES_OAEP_SHA_1", + "RSAES_OAEP_SHA_256" + ] + + @property + def signing_algorithms(self): + if self.key_usage == "ENCRYPT_DECRYPT": + return None + elif self.customer_master_key_spec in ["ECC_NIST_P256", "ECC_SECG_P256K1"]: + return ["ECDSA_SHA_256"] + elif self.customer_master_key_spec == "ECC_NIST_P384": + return ["ECDSA_SHA_384"] + elif self.customer_master_key_spec == "ECC_NIST_P521": + return ["ECDSA_SHA_512"] + else: + return [ + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512" + ] + def to_dict(self): key_dict = { "KeyMetadata": { @@ -81,6 +111,7 @@ class Key(BaseModel): key = kms_backend.create_key( policy=properties["KeyPolicy"], key_usage="ENCRYPT_DECRYPT", + customer_master_key_spec="SYMMETRIC_DEFAULT", description=properties["Description"], tags=properties.get("Tags"), region=region_name, @@ -102,8 +133,8 @@ class KmsBackend(BaseBackend): self.keys = {} self.key_to_aliases = defaultdict(set) - def create_key(self, policy, key_usage, description, tags, region): - key = Key(policy, key_usage, description, tags, region) + def create_key(self, policy, key_usage, customer_master_key_spec, description, tags, region): + key = Key(policy, key_usage, customer_master_key_spec, description, tags, region) self.keys[key.id] = key return key diff --git a/moto/kms/responses.py b/moto/kms/responses.py index d3a9726e1..15b990bbb 100644 --- a/moto/kms/responses.py +++ b/moto/kms/responses.py @@ -118,11 +118,12 @@ class KmsResponse(BaseResponse): """https://docs.aws.amazon.com/kms/latest/APIReference/API_CreateKey.html""" policy = self.parameters.get("Policy") key_usage = self.parameters.get("KeyUsage") + customer_master_key_spec = self.parameters.get("CustomerMasterKeySpec") description = self.parameters.get("Description") tags = self.parameters.get("Tags") key = self.kms_backend.create_key( - policy, key_usage, description, tags, self.region + policy, key_usage, customer_master_key_spec, description, tags, self.region ) return json.dumps(key.to_dict()) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8c2843ee4..c5a49b974 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -64,6 +64,53 @@ def test_create_key(): key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") key["KeyMetadata"].should_not.have.key("SigningAlgorithms") + key = conn.create_key( + KeyUsage = "ENCRYPT_DECRYPT", + CustomerMasterKeySpec = 'RSA_2048', + ) + + sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal(["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"]) + key["KeyMetadata"].should_not.have.key("SigningAlgorithms") + + key = conn.create_key( + KeyUsage = "SIGN_VERIFY", + CustomerMasterKeySpec = 'RSA_2048', + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal([ + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512" + ]) + + key = conn.create_key( + KeyUsage = "SIGN_VERIFY", + CustomerMasterKeySpec = 'ECC_SECG_P256K1', + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) + + key = conn.create_key( + KeyUsage = "SIGN_VERIFY", + CustomerMasterKeySpec = 'ECC_NIST_P384', + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) + + key = conn.create_key( + KeyUsage = "SIGN_VERIFY", + CustomerMasterKeySpec = 'ECC_NIST_P521', + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_512"]) + @mock_kms_deprecated def test_describe_key(): diff --git a/tests/test_kms/test_utils.py b/tests/test_kms/test_utils.py index f5478e0ef..4c84ed127 100644 --- a/tests/test_kms/test_utils.py +++ b/tests/test_kms/test_utils.py @@ -102,7 +102,7 @@ def test_deserialize_ciphertext_blob(raw, serialized): @parameterized(((ec[0],) for ec in ENCRYPTION_CONTEXT_VECTORS)) def test_encrypt_decrypt_cycle(encryption_context): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( @@ -133,7 +133,7 @@ def test_encrypt_unknown_key_id(): def test_decrypt_invalid_ciphertext_format(): - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key_map = {master_key.id: master_key} with assert_raises(InvalidCiphertextException): @@ -153,7 +153,7 @@ def test_decrypt_unknwown_key_id(): def test_decrypt_invalid_ciphertext(): - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = ( master_key.id.encode("utf-8") + b"123456789012" @@ -171,7 +171,7 @@ def test_decrypt_invalid_ciphertext(): def test_decrypt_invalid_encryption_context(): plaintext = b"some secret plaintext" - master_key = Key("nop", "nop", "nop", [], "nop") + master_key = Key("nop", "nop", "nop", "nop", [], "nop") master_key_map = {master_key.id: master_key} ciphertext_blob = encrypt( From b4c9b76ca958223f54c6b8cc22b85bc100f48c18 Mon Sep 17 00:00:00 2001 From: Terry Griffin <“griffint61@users.noreply.github.com”> Date: Thu, 6 Feb 2020 15:26:20 -0800 Subject: [PATCH 103/125] Added 'x-amzn-ErrorType' in return header from lambda:get_function for missing function --- moto/awslambda/responses.py | 2 +- tests/test_awslambda/test_lambda.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index bac670b8e..3152ea6f6 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -295,7 +295,7 @@ class LambdaResponse(BaseResponse): code["Configuration"]["FunctionArn"] += ":$LATEST" return 200, {}, json.dumps(code) else: - return 404, {}, "{}" + return 404, {"x-amzn-ErrorType": "ResourceNotFoundException"}, "{}" def _get_aws_region(self, full_url): region = self.region_regex.search(full_url) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 4db13d220..f1265ce71 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -78,7 +78,7 @@ def lambda_handler(event, context): def get_test_zip_file4(): pfunc = """ -def lambda_handler(event, context): +def lambda_handler(event, context): raise Exception('I failed!') """ return _process_lambda(pfunc) @@ -455,7 +455,7 @@ def test_get_function(): ) # Test get function when can't find function name - with assert_raises(ClientError): + with assert_raises(conn.exceptions.ResourceNotFoundException): conn.get_function(FunctionName="junk", Qualifier="$LATEST") From 4833419499c1310ebd2a0012b4b7ba842146ae41 Mon Sep 17 00:00:00 2001 From: gruebel Date: Fri, 7 Feb 2020 15:38:37 +0100 Subject: [PATCH 104/125] Fix CreationDate handling --- moto/kms/models.py | 32 ++++++++++---------- tests/test_kms/test_kms.py | 61 +++++++++++++++++++++++++------------- 2 files changed, 56 insertions(+), 37 deletions(-) diff --git a/moto/kms/models.py b/moto/kms/models.py index 1015aa72a..ff5d0a356 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -7,7 +7,7 @@ from datetime import datetime, timedelta from boto3 import Session from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_without_milliseconds +from moto.core.utils import unix_time from moto.iam.models import ACCOUNT_ID @@ -15,8 +15,11 @@ from .utils import decrypt, encrypt, generate_key_id, generate_master_key class Key(BaseModel): - def __init__(self, policy, key_usage, customer_master_key_spec, description, tags, region): + def __init__( + self, policy, key_usage, customer_master_key_spec, description, tags, region + ): self.id = generate_key_id() + self.creation_date = unix_time() self.policy = policy self.key_usage = key_usage self.key_state = "Enabled" @@ -49,10 +52,7 @@ class Key(BaseModel): elif self.customer_master_key_spec == "SYMMETRIC_DEFAULT": return ["SYMMETRIC_DEFAULT"] else: - return [ - "RSAES_OAEP_SHA_1", - "RSAES_OAEP_SHA_256" - ] + return ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] @property def signing_algorithms(self): @@ -71,7 +71,7 @@ class Key(BaseModel): "RSASSA_PKCS1_V1_5_SHA_512", "RSASSA_PSS_SHA_256", "RSASSA_PSS_SHA_384", - "RSASSA_PSS_SHA_512" + "RSASSA_PSS_SHA_512", ] def to_dict(self): @@ -79,7 +79,7 @@ class Key(BaseModel): "KeyMetadata": { "AWSAccountId": self.account_id, "Arn": self.arn, - "CreationDate": iso_8601_datetime_without_milliseconds(datetime.now()), + "CreationDate": self.creation_date, "CustomerMasterKeySpec": self.customer_master_key_spec, "Description": self.description, "Enabled": self.enabled, @@ -93,9 +93,7 @@ class Key(BaseModel): } } if self.key_state == "PendingDeletion": - key_dict["KeyMetadata"][ - "DeletionDate" - ] = iso_8601_datetime_without_milliseconds(self.deletion_date) + key_dict["KeyMetadata"]["DeletionDate"] = unix_time(self.deletion_date) return key_dict def delete(self, region_name): @@ -133,8 +131,12 @@ class KmsBackend(BaseBackend): self.keys = {} self.key_to_aliases = defaultdict(set) - def create_key(self, policy, key_usage, customer_master_key_spec, description, tags, region): - key = Key(policy, key_usage, customer_master_key_spec, description, tags, region) + def create_key( + self, policy, key_usage, customer_master_key_spec, description, tags, region + ): + key = Key( + policy, key_usage, customer_master_key_spec, description, tags, region + ) self.keys[key.id] = key return key @@ -258,9 +260,7 @@ class KmsBackend(BaseBackend): self.keys[key_id].deletion_date = datetime.now() + timedelta( days=pending_window_in_days ) - return iso_8601_datetime_without_milliseconds( - self.keys[key_id].deletion_date - ) + return unix_time(self.keys[key_id].deletion_date) def encrypt(self, key_id, plaintext, encryption_context): key_id = self.any_id_to_key_id(key_id) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index c5a49b974..c924af76d 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -65,47 +65,44 @@ def test_create_key(): key["KeyMetadata"].should_not.have.key("SigningAlgorithms") key = conn.create_key( - KeyUsage = "ENCRYPT_DECRYPT", - CustomerMasterKeySpec = 'RSA_2048', + KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="RSA_2048", ) - sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal(["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"]) + sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal( + ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] + ) key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - key = conn.create_key( - KeyUsage = "SIGN_VERIFY", - CustomerMasterKeySpec = 'RSA_2048', - ) + key = conn.create_key(KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="RSA_2048",) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") - sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal([ - "RSASSA_PKCS1_V1_5_SHA_256", - "RSASSA_PKCS1_V1_5_SHA_384", - "RSASSA_PKCS1_V1_5_SHA_512", - "RSASSA_PSS_SHA_256", - "RSASSA_PSS_SHA_384", - "RSASSA_PSS_SHA_512" - ]) + sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal( + [ + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + ] + ) key = conn.create_key( - KeyUsage = "SIGN_VERIFY", - CustomerMasterKeySpec = 'ECC_SECG_P256K1', + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_SECG_P256K1", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) key = conn.create_key( - KeyUsage = "SIGN_VERIFY", - CustomerMasterKeySpec = 'ECC_NIST_P384', + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P384", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) key = conn.create_key( - KeyUsage = "SIGN_VERIFY", - CustomerMasterKeySpec = 'ECC_NIST_P521', + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P521", ) key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") @@ -125,6 +122,28 @@ def test_describe_key(): key["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") +@mock_kms +def test_boto3_describe_key(): + client = boto3.client("kms", region_name="us-east-1") + response = client.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT",) + key_id = response["KeyMetadata"]["KeyId"] + + response = client.describe_key(KeyId=key_id) + + response["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") + response["KeyMetadata"]["CreationDate"].should.be.a(datetime) + response["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") + response["KeyMetadata"]["Description"].should.equal("my key") + response["KeyMetadata"]["Enabled"].should.be.ok + response["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) + response["KeyMetadata"]["KeyId"].should_not.be.empty + response["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") + response["KeyMetadata"]["KeyState"].should.equal("Enabled") + response["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") + response["KeyMetadata"]["Origin"].should.equal("AWS_KMS") + response["KeyMetadata"].should_not.have.key("SigningAlgorithms") + + @mock_kms_deprecated def test_describe_key_via_alias(): conn = boto.kms.connect_to_region("us-west-2") From ec56351416d080ed07506153c1929a1f182c6d96 Mon Sep 17 00:00:00 2001 From: gruebel Date: Fri, 7 Feb 2020 16:28:23 +0100 Subject: [PATCH 105/125] Move boto3 tests to separate file --- tests/test_kms/test_kms.py | 623 +----------------------------- tests/test_kms/test_kms_boto3.py | 638 +++++++++++++++++++++++++++++++ 2 files changed, 639 insertions(+), 622 deletions(-) create mode 100644 tests/test_kms/test_kms_boto3.py diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index c924af76d..9ce324373 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -1,25 +1,18 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals -from datetime import date -from datetime import datetime -from dateutil.tz import tzutc import base64 -import os import re -import boto3 import boto.kms -import botocore.exceptions import six import sure # noqa from boto.exception import JSONResponseError from boto.kms.exceptions import AlreadyExistsException, NotFoundException -from freezegun import freeze_time from nose.tools import assert_raises from parameterized import parameterized from moto.kms.exceptions import NotFoundException as MotoNotFoundException -from moto import mock_kms, mock_kms_deprecated +from moto import mock_kms_deprecated PLAINTEXT_VECTORS = ( (b"some encodeable plaintext",), @@ -35,80 +28,6 @@ def _get_encoded_value(plaintext): return plaintext.encode("utf-8") -@mock_kms -def test_create_key(): - conn = boto3.client("kms", region_name="us-east-1") - with freeze_time("2015-01-01 00:00:00"): - key = conn.create_key( - Policy="my policy", - Description="my key", - KeyUsage="ENCRYPT_DECRYPT", - Tags=[{"TagKey": "project", "TagValue": "moto"}], - ) - - key["KeyMetadata"]["Arn"].should.equal( - "arn:aws:kms:us-east-1:123456789012:key/{}".format( - key["KeyMetadata"]["KeyId"] - ) - ) - key["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") - key["KeyMetadata"]["CreationDate"].should.be.a(datetime) - key["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") - key["KeyMetadata"]["Description"].should.equal("my key") - key["KeyMetadata"]["Enabled"].should.be.ok - key["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) - key["KeyMetadata"]["KeyId"].should_not.be.empty - key["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") - key["KeyMetadata"]["KeyState"].should.equal("Enabled") - key["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") - key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") - key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - - key = conn.create_key( - KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="RSA_2048", - ) - - sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal( - ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] - ) - key["KeyMetadata"].should_not.have.key("SigningAlgorithms") - - key = conn.create_key(KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="RSA_2048",) - - key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") - sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal( - [ - "RSASSA_PKCS1_V1_5_SHA_256", - "RSASSA_PKCS1_V1_5_SHA_384", - "RSASSA_PKCS1_V1_5_SHA_512", - "RSASSA_PSS_SHA_256", - "RSASSA_PSS_SHA_384", - "RSASSA_PSS_SHA_512", - ] - ) - - key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_SECG_P256K1", - ) - - key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") - key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) - - key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P384", - ) - - key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") - key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) - - key = conn.create_key( - KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P521", - ) - - key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") - key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_512"]) - - @mock_kms_deprecated def test_describe_key(): conn = boto.kms.connect_to_region("us-west-2") @@ -122,28 +41,6 @@ def test_describe_key(): key["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") -@mock_kms -def test_boto3_describe_key(): - client = boto3.client("kms", region_name="us-east-1") - response = client.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT",) - key_id = response["KeyMetadata"]["KeyId"] - - response = client.describe_key(KeyId=key_id) - - response["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") - response["KeyMetadata"]["CreationDate"].should.be.a(datetime) - response["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") - response["KeyMetadata"]["Description"].should.equal("my key") - response["KeyMetadata"]["Enabled"].should.be.ok - response["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) - response["KeyMetadata"]["KeyId"].should_not.be.empty - response["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") - response["KeyMetadata"]["KeyState"].should.equal("Enabled") - response["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") - response["KeyMetadata"]["Origin"].should.equal("AWS_KMS") - response["KeyMetadata"].should_not.have.key("SigningAlgorithms") - - @mock_kms_deprecated def test_describe_key_via_alias(): conn = boto.kms.connect_to_region("us-west-2") @@ -175,22 +72,6 @@ def test_describe_key_via_alias_not_found(): ) -@parameterized( - ( - ("alias/does-not-exist",), - ("arn:aws:kms:us-east-1:012345678912:alias/does-not-exist",), - ("invalid",), - ) -) -@mock_kms -def test_describe_key_via_alias_invalid_alias(key_id): - client = boto3.client("kms", region_name="us-east-1") - client.create_key(Description="key") - - with assert_raises(client.exceptions.NotFoundException): - client.describe_key(KeyId=key_id) - - @mock_kms_deprecated def test_describe_key_via_arn(): conn = boto.kms.connect_to_region("us-west-2") @@ -318,71 +199,6 @@ def test_generate_data_key(): response["KeyId"].should.equal(key_arn) -@mock_kms -def test_boto3_generate_data_key(): - kms = boto3.client("kms", region_name="us-west-2") - - key = kms.create_key() - key_id = key["KeyMetadata"]["KeyId"] - key_arn = key["KeyMetadata"]["Arn"] - - response = kms.generate_data_key(KeyId=key_id, NumberOfBytes=32) - - # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(response["CiphertextBlob"], validate=True) - # Plaintext must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(response["Plaintext"], validate=True) - - response["KeyId"].should.equal(key_arn) - - -@parameterized(PLAINTEXT_VECTORS) -@mock_kms -def test_encrypt(plaintext): - client = boto3.client("kms", region_name="us-west-2") - - key = client.create_key(Description="key") - key_id = key["KeyMetadata"]["KeyId"] - key_arn = key["KeyMetadata"]["Arn"] - - response = client.encrypt(KeyId=key_id, Plaintext=plaintext) - response["CiphertextBlob"].should_not.equal(plaintext) - - # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(response["CiphertextBlob"], validate=True) - - response["KeyId"].should.equal(key_arn) - - -@parameterized(PLAINTEXT_VECTORS) -@mock_kms -def test_decrypt(plaintext): - client = boto3.client("kms", region_name="us-west-2") - - key = client.create_key(Description="key") - key_id = key["KeyMetadata"]["KeyId"] - key_arn = key["KeyMetadata"]["Arn"] - - encrypt_response = client.encrypt(KeyId=key_id, Plaintext=plaintext) - - client.create_key(Description="key") - # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(encrypt_response["CiphertextBlob"], validate=True) - - decrypt_response = client.decrypt(CiphertextBlob=encrypt_response["CiphertextBlob"]) - - # Plaintext must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(decrypt_response["Plaintext"], validate=True) - - decrypt_response["Plaintext"].should.equal(_get_encoded_value(plaintext)) - decrypt_response["KeyId"].should.equal(key_arn) - - @mock_kms_deprecated def test_disable_key_rotation_with_missing_key(): conn = boto.kms.connect_to_region("us-west-2") @@ -853,25 +669,6 @@ def test__list_aliases(): len(aliases).should.equal(7) -@parameterized( - ( - ("not-a-uuid",), - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) -) -@mock_kms -def test_invalid_key_ids(key_id): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.generate_data_key(KeyId=key_id, NumberOfBytes=5) - - @mock_kms_deprecated def test__assert_default_policy(): from moto.kms.responses import _assert_default_policy @@ -882,421 +679,3 @@ def test__assert_default_policy(): _assert_default_policy.when.called_with("default").should_not.throw( MotoNotFoundException ) - - -@parameterized(PLAINTEXT_VECTORS) -@mock_kms -def test_kms_encrypt_boto3(plaintext): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="key") - response = client.encrypt(KeyId=key["KeyMetadata"]["KeyId"], Plaintext=plaintext) - - response = client.decrypt(CiphertextBlob=response["CiphertextBlob"]) - response["Plaintext"].should.equal(_get_encoded_value(plaintext)) - - -@mock_kms -def test_disable_key(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="disable-key") - client.disable_key(KeyId=key["KeyMetadata"]["KeyId"]) - - result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == "Disabled" - - -@mock_kms -def test_enable_key(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="enable-key") - client.disable_key(KeyId=key["KeyMetadata"]["KeyId"]) - client.enable_key(KeyId=key["KeyMetadata"]["KeyId"]) - - result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) - assert result["KeyMetadata"]["Enabled"] == True - assert result["KeyMetadata"]["KeyState"] == "Enabled" - - -@mock_kms -def test_schedule_key_deletion(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="schedule-key-deletion") - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "false": - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - assert response["KeyId"] == key["KeyMetadata"]["KeyId"] - assert response["DeletionDate"] == datetime( - 2015, 1, 31, 12, 0, tzinfo=tzutc() - ) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - assert response["KeyId"] == key["KeyMetadata"]["KeyId"] - - result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == "PendingDeletion" - assert "DeletionDate" in result["KeyMetadata"] - - -@mock_kms -def test_schedule_key_deletion_custom(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="schedule-key-deletion") - if os.environ.get("TEST_SERVER_MODE", "false").lower() == "false": - with freeze_time("2015-01-01 12:00:00"): - response = client.schedule_key_deletion( - KeyId=key["KeyMetadata"]["KeyId"], PendingWindowInDays=7 - ) - assert response["KeyId"] == key["KeyMetadata"]["KeyId"] - assert response["DeletionDate"] == datetime( - 2015, 1, 8, 12, 0, tzinfo=tzutc() - ) - else: - # Can't manipulate time in server mode - response = client.schedule_key_deletion( - KeyId=key["KeyMetadata"]["KeyId"], PendingWindowInDays=7 - ) - assert response["KeyId"] == key["KeyMetadata"]["KeyId"] - - result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == "PendingDeletion" - assert "DeletionDate" in result["KeyMetadata"] - - -@mock_kms -def test_cancel_key_deletion(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="cancel-key-deletion") - client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - response = client.cancel_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - assert response["KeyId"] == key["KeyMetadata"]["KeyId"] - - result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) - assert result["KeyMetadata"]["Enabled"] == False - assert result["KeyMetadata"]["KeyState"] == "Disabled" - assert "DeletionDate" not in result["KeyMetadata"] - - -@mock_kms -def test_update_key_description(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="old_description") - key_id = key["KeyMetadata"]["KeyId"] - - result = client.update_key_description(KeyId=key_id, Description="new_description") - assert "ResponseMetadata" in result - - -@mock_kms -def test_tag_resource(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="cancel-key-deletion") - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - - keyid = response["KeyId"] - response = client.tag_resource( - KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] - ) - - # Shouldn't have any data, just header - assert len(response.keys()) == 1 - - -@mock_kms -def test_list_resource_tags(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="cancel-key-deletion") - response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) - - keyid = response["KeyId"] - response = client.tag_resource( - KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] - ) - - response = client.list_resource_tags(KeyId=keyid) - assert response["Tags"][0]["TagKey"] == "string" - assert response["Tags"][0]["TagValue"] == "string" - - -@parameterized( - ( - (dict(KeySpec="AES_256"), 32), - (dict(KeySpec="AES_128"), 16), - (dict(NumberOfBytes=64), 64), - (dict(NumberOfBytes=1), 1), - (dict(NumberOfBytes=1024), 1024), - ) -) -@mock_kms -def test_generate_data_key_sizes(kwargs, expected_key_length): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="generate-data-key-size") - - response = client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) - - assert len(response["Plaintext"]) == expected_key_length - - -@mock_kms -def test_generate_data_key_decrypt(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="generate-data-key-decrypt") - - resp1 = client.generate_data_key( - KeyId=key["KeyMetadata"]["KeyId"], KeySpec="AES_256" - ) - resp2 = client.decrypt(CiphertextBlob=resp1["CiphertextBlob"]) - - assert resp1["Plaintext"] == resp2["Plaintext"] - - -@parameterized( - ( - (dict(KeySpec="AES_257"),), - (dict(KeySpec="AES_128", NumberOfBytes=16),), - (dict(NumberOfBytes=2048),), - (dict(NumberOfBytes=0),), - (dict(),), - ) -) -@mock_kms -def test_generate_data_key_invalid_size_params(kwargs): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="generate-data-key-size") - - with assert_raises( - (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError) - ) as err: - client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) - - -@parameterized( - ( - ("alias/DoesNotExist",), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), - ("d25652e4-d2d2-49f7-929a-671ccda580c6",), - ( - "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", - ), - ) -) -@mock_kms -def test_generate_data_key_invalid_key(key_id): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.generate_data_key(KeyId=key_id, KeySpec="AES_256") - - -@parameterized( - ( - ("alias/DoesExist", False), - ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), - ("", True), - ("arn:aws:kms:us-east-1:012345678912:key/", True), - ) -) -@mock_kms -def test_generate_data_key_all_valid_key_ids(prefix, append_key_id): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key() - key_id = key["KeyMetadata"]["KeyId"] - client.create_alias(AliasName="alias/DoesExist", TargetKeyId=key_id) - - target_id = prefix - if append_key_id: - target_id += key_id - - client.generate_data_key(KeyId=key_id, NumberOfBytes=32) - - -@mock_kms -def test_generate_data_key_without_plaintext_decrypt(): - client = boto3.client("kms", region_name="us-east-1") - key = client.create_key(Description="generate-data-key-decrypt") - - resp1 = client.generate_data_key_without_plaintext( - KeyId=key["KeyMetadata"]["KeyId"], KeySpec="AES_256" - ) - - assert "Plaintext" not in resp1 - - -@parameterized(PLAINTEXT_VECTORS) -@mock_kms -def test_re_encrypt_decrypt(plaintext): - client = boto3.client("kms", region_name="us-west-2") - - key_1 = client.create_key(Description="key 1") - key_1_id = key_1["KeyMetadata"]["KeyId"] - key_1_arn = key_1["KeyMetadata"]["Arn"] - key_2 = client.create_key(Description="key 2") - key_2_id = key_2["KeyMetadata"]["KeyId"] - key_2_arn = key_2["KeyMetadata"]["Arn"] - - encrypt_response = client.encrypt( - KeyId=key_1_id, Plaintext=plaintext, EncryptionContext={"encryption": "context"} - ) - - re_encrypt_response = client.re_encrypt( - CiphertextBlob=encrypt_response["CiphertextBlob"], - SourceEncryptionContext={"encryption": "context"}, - DestinationKeyId=key_2_id, - DestinationEncryptionContext={"another": "context"}, - ) - - # CiphertextBlob must NOT be base64-encoded - with assert_raises(Exception): - base64.b64decode(re_encrypt_response["CiphertextBlob"], validate=True) - - re_encrypt_response["SourceKeyId"].should.equal(key_1_arn) - re_encrypt_response["KeyId"].should.equal(key_2_arn) - - decrypt_response_1 = client.decrypt( - CiphertextBlob=encrypt_response["CiphertextBlob"], - EncryptionContext={"encryption": "context"}, - ) - decrypt_response_1["Plaintext"].should.equal(_get_encoded_value(plaintext)) - decrypt_response_1["KeyId"].should.equal(key_1_arn) - - decrypt_response_2 = client.decrypt( - CiphertextBlob=re_encrypt_response["CiphertextBlob"], - EncryptionContext={"another": "context"}, - ) - decrypt_response_2["Plaintext"].should.equal(_get_encoded_value(plaintext)) - decrypt_response_2["KeyId"].should.equal(key_2_arn) - - decrypt_response_1["Plaintext"].should.equal(decrypt_response_2["Plaintext"]) - - -@mock_kms -def test_re_encrypt_to_invalid_destination(): - client = boto3.client("kms", region_name="us-west-2") - - key = client.create_key(Description="key 1") - key_id = key["KeyMetadata"]["KeyId"] - - encrypt_response = client.encrypt(KeyId=key_id, Plaintext=b"some plaintext") - - with assert_raises(client.exceptions.NotFoundException): - client.re_encrypt( - CiphertextBlob=encrypt_response["CiphertextBlob"], - DestinationKeyId="alias/DoesNotExist", - ) - - -@parameterized(((12,), (44,), (91,), (1,), (1024,))) -@mock_kms -def test_generate_random(number_of_bytes): - client = boto3.client("kms", region_name="us-west-2") - - response = client.generate_random(NumberOfBytes=number_of_bytes) - - response["Plaintext"].should.be.a(bytes) - len(response["Plaintext"]).should.equal(number_of_bytes) - - -@parameterized( - ( - (2048, botocore.exceptions.ClientError), - (1025, botocore.exceptions.ClientError), - (0, botocore.exceptions.ParamValidationError), - (-1, botocore.exceptions.ParamValidationError), - (-1024, botocore.exceptions.ParamValidationError), - ) -) -@mock_kms -def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): - client = boto3.client("kms", region_name="us-west-2") - - with assert_raises(error_type): - client.generate_random(NumberOfBytes=number_of_bytes) - - -@mock_kms -def test_enable_key_rotation_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.enable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_disable_key_rotation_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.disable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_enable_key_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.enable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_disable_key_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.disable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_cancel_key_deletion_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.cancel_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_schedule_key_deletion_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.schedule_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_get_key_rotation_status_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.get_key_rotation_status(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_get_key_policy_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.get_key_policy( - KeyId="12366f9b-1230-123d-123e-123e6ae60c02", PolicyName="default" - ) - - -@mock_kms -def test_list_key_policies_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.list_key_policies(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") - - -@mock_kms -def test_put_key_policy_key_not_found(): - client = boto3.client("kms", region_name="us-east-1") - - with assert_raises(client.exceptions.NotFoundException): - client.put_key_policy( - KeyId="00000000-0000-0000-0000-000000000000", - PolicyName="default", - Policy="new policy", - ) diff --git a/tests/test_kms/test_kms_boto3.py b/tests/test_kms/test_kms_boto3.py new file mode 100644 index 000000000..c125c0557 --- /dev/null +++ b/tests/test_kms/test_kms_boto3.py @@ -0,0 +1,638 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals +from datetime import datetime +from dateutil.tz import tzutc +import base64 +import os + +import boto3 +import botocore.exceptions +import six +import sure # noqa +from freezegun import freeze_time +from nose.tools import assert_raises +from parameterized import parameterized + +from moto import mock_kms + +PLAINTEXT_VECTORS = ( + (b"some encodeable plaintext",), + (b"some unencodeable plaintext \xec\x8a\xcf\xb6r\xe9\xb5\xeb\xff\xa23\x16",), + ("some unicode characters ø˚∆øˆˆ∆ßçøˆˆçßøˆ¨¥",), +) + + +def _get_encoded_value(plaintext): + if isinstance(plaintext, six.binary_type): + return plaintext + + return plaintext.encode("utf-8") + + +@mock_kms +def test_create_key(): + conn = boto3.client("kms", region_name="us-east-1") + key = conn.create_key( + Policy="my policy", + Description="my key", + KeyUsage="ENCRYPT_DECRYPT", + Tags=[{"TagKey": "project", "TagValue": "moto"}], + ) + + key["KeyMetadata"]["Arn"].should.equal( + "arn:aws:kms:us-east-1:123456789012:key/{}".format(key["KeyMetadata"]["KeyId"]) + ) + key["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") + key["KeyMetadata"]["CreationDate"].should.be.a(datetime) + key["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") + key["KeyMetadata"]["Description"].should.equal("my key") + key["KeyMetadata"]["Enabled"].should.be.ok + key["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) + key["KeyMetadata"]["KeyId"].should_not.be.empty + key["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") + key["KeyMetadata"]["KeyState"].should.equal("Enabled") + key["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") + key["KeyMetadata"]["Origin"].should.equal("AWS_KMS") + key["KeyMetadata"].should_not.have.key("SigningAlgorithms") + + key = conn.create_key(KeyUsage="ENCRYPT_DECRYPT", CustomerMasterKeySpec="RSA_2048",) + + sorted(key["KeyMetadata"]["EncryptionAlgorithms"]).should.equal( + ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"] + ) + key["KeyMetadata"].should_not.have.key("SigningAlgorithms") + + key = conn.create_key(KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="RSA_2048",) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + sorted(key["KeyMetadata"]["SigningAlgorithms"]).should.equal( + [ + "RSASSA_PKCS1_V1_5_SHA_256", + "RSASSA_PKCS1_V1_5_SHA_384", + "RSASSA_PKCS1_V1_5_SHA_512", + "RSASSA_PSS_SHA_256", + "RSASSA_PSS_SHA_384", + "RSASSA_PSS_SHA_512", + ] + ) + + key = conn.create_key( + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_SECG_P256K1", + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_256"]) + + key = conn.create_key( + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P384", + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_384"]) + + key = conn.create_key( + KeyUsage="SIGN_VERIFY", CustomerMasterKeySpec="ECC_NIST_P521", + ) + + key["KeyMetadata"].should_not.have.key("EncryptionAlgorithms") + key["KeyMetadata"]["SigningAlgorithms"].should.equal(["ECDSA_SHA_512"]) + + +@mock_kms +def test_describe_key(): + client = boto3.client("kms", region_name="us-east-1") + response = client.create_key(Description="my key", KeyUsage="ENCRYPT_DECRYPT",) + key_id = response["KeyMetadata"]["KeyId"] + + response = client.describe_key(KeyId=key_id) + + response["KeyMetadata"]["AWSAccountId"].should.equal("123456789012") + response["KeyMetadata"]["CreationDate"].should.be.a(datetime) + response["KeyMetadata"]["CustomerMasterKeySpec"].should.equal("SYMMETRIC_DEFAULT") + response["KeyMetadata"]["Description"].should.equal("my key") + response["KeyMetadata"]["Enabled"].should.be.ok + response["KeyMetadata"]["EncryptionAlgorithms"].should.equal(["SYMMETRIC_DEFAULT"]) + response["KeyMetadata"]["KeyId"].should_not.be.empty + response["KeyMetadata"]["KeyManager"].should.equal("CUSTOMER") + response["KeyMetadata"]["KeyState"].should.equal("Enabled") + response["KeyMetadata"]["KeyUsage"].should.equal("ENCRYPT_DECRYPT") + response["KeyMetadata"]["Origin"].should.equal("AWS_KMS") + response["KeyMetadata"].should_not.have.key("SigningAlgorithms") + + +@parameterized( + ( + ("alias/does-not-exist",), + ("arn:aws:kms:us-east-1:012345678912:alias/does-not-exist",), + ("invalid",), + ) +) +@mock_kms +def test_describe_key_via_alias_invalid_alias(key_id): + client = boto3.client("kms", region_name="us-east-1") + client.create_key(Description="key") + + with assert_raises(client.exceptions.NotFoundException): + client.describe_key(KeyId=key_id) + + +@mock_kms +def test_generate_data_key(): + kms = boto3.client("kms", region_name="us-west-2") + + key = kms.create_key() + key_id = key["KeyMetadata"]["KeyId"] + key_arn = key["KeyMetadata"]["Arn"] + + response = kms.generate_data_key(KeyId=key_id, NumberOfBytes=32) + + # CiphertextBlob must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(response["CiphertextBlob"], validate=True) + # Plaintext must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(response["Plaintext"], validate=True) + + response["KeyId"].should.equal(key_arn) + + +@parameterized(PLAINTEXT_VECTORS) +@mock_kms +def test_encrypt(plaintext): + client = boto3.client("kms", region_name="us-west-2") + + key = client.create_key(Description="key") + key_id = key["KeyMetadata"]["KeyId"] + key_arn = key["KeyMetadata"]["Arn"] + + response = client.encrypt(KeyId=key_id, Plaintext=plaintext) + response["CiphertextBlob"].should_not.equal(plaintext) + + # CiphertextBlob must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(response["CiphertextBlob"], validate=True) + + response["KeyId"].should.equal(key_arn) + + +@parameterized(PLAINTEXT_VECTORS) +@mock_kms +def test_decrypt(plaintext): + client = boto3.client("kms", region_name="us-west-2") + + key = client.create_key(Description="key") + key_id = key["KeyMetadata"]["KeyId"] + key_arn = key["KeyMetadata"]["Arn"] + + encrypt_response = client.encrypt(KeyId=key_id, Plaintext=plaintext) + + client.create_key(Description="key") + # CiphertextBlob must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(encrypt_response["CiphertextBlob"], validate=True) + + decrypt_response = client.decrypt(CiphertextBlob=encrypt_response["CiphertextBlob"]) + + # Plaintext must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(decrypt_response["Plaintext"], validate=True) + + decrypt_response["Plaintext"].should.equal(_get_encoded_value(plaintext)) + decrypt_response["KeyId"].should.equal(key_arn) + + +@parameterized( + ( + ("not-a-uuid",), + ("alias/DoesNotExist",), + ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), + ("d25652e4-d2d2-49f7-929a-671ccda580c6",), + ( + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ), + ) +) +@mock_kms +def test_invalid_key_ids(key_id): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key(KeyId=key_id, NumberOfBytes=5) + + +@parameterized(PLAINTEXT_VECTORS) +@mock_kms +def test_kms_encrypt(plaintext): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="key") + response = client.encrypt(KeyId=key["KeyMetadata"]["KeyId"], Plaintext=plaintext) + + response = client.decrypt(CiphertextBlob=response["CiphertextBlob"]) + response["Plaintext"].should.equal(_get_encoded_value(plaintext)) + + +@mock_kms +def test_disable_key(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="disable-key") + client.disable_key(KeyId=key["KeyMetadata"]["KeyId"]) + + result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == "Disabled" + + +@mock_kms +def test_enable_key(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="enable-key") + client.disable_key(KeyId=key["KeyMetadata"]["KeyId"]) + client.enable_key(KeyId=key["KeyMetadata"]["KeyId"]) + + result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) + assert result["KeyMetadata"]["Enabled"] == True + assert result["KeyMetadata"]["KeyState"] == "Enabled" + + +@mock_kms +def test_schedule_key_deletion(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="schedule-key-deletion") + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "false": + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + assert response["KeyId"] == key["KeyMetadata"]["KeyId"] + assert response["DeletionDate"] == datetime( + 2015, 1, 31, 12, 0, tzinfo=tzutc() + ) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + assert response["KeyId"] == key["KeyMetadata"]["KeyId"] + + result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == "PendingDeletion" + assert "DeletionDate" in result["KeyMetadata"] + + +@mock_kms +def test_schedule_key_deletion_custom(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="schedule-key-deletion") + if os.environ.get("TEST_SERVER_MODE", "false").lower() == "false": + with freeze_time("2015-01-01 12:00:00"): + response = client.schedule_key_deletion( + KeyId=key["KeyMetadata"]["KeyId"], PendingWindowInDays=7 + ) + assert response["KeyId"] == key["KeyMetadata"]["KeyId"] + assert response["DeletionDate"] == datetime( + 2015, 1, 8, 12, 0, tzinfo=tzutc() + ) + else: + # Can't manipulate time in server mode + response = client.schedule_key_deletion( + KeyId=key["KeyMetadata"]["KeyId"], PendingWindowInDays=7 + ) + assert response["KeyId"] == key["KeyMetadata"]["KeyId"] + + result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == "PendingDeletion" + assert "DeletionDate" in result["KeyMetadata"] + + +@mock_kms +def test_cancel_key_deletion(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="cancel-key-deletion") + client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + response = client.cancel_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + assert response["KeyId"] == key["KeyMetadata"]["KeyId"] + + result = client.describe_key(KeyId=key["KeyMetadata"]["KeyId"]) + assert result["KeyMetadata"]["Enabled"] == False + assert result["KeyMetadata"]["KeyState"] == "Disabled" + assert "DeletionDate" not in result["KeyMetadata"] + + +@mock_kms +def test_update_key_description(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="old_description") + key_id = key["KeyMetadata"]["KeyId"] + + result = client.update_key_description(KeyId=key_id, Description="new_description") + assert "ResponseMetadata" in result + + +@mock_kms +def test_tag_resource(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="cancel-key-deletion") + response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + + keyid = response["KeyId"] + response = client.tag_resource( + KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] + ) + + # Shouldn't have any data, just header + assert len(response.keys()) == 1 + + +@mock_kms +def test_list_resource_tags(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="cancel-key-deletion") + response = client.schedule_key_deletion(KeyId=key["KeyMetadata"]["KeyId"]) + + keyid = response["KeyId"] + response = client.tag_resource( + KeyId=keyid, Tags=[{"TagKey": "string", "TagValue": "string"}] + ) + + response = client.list_resource_tags(KeyId=keyid) + assert response["Tags"][0]["TagKey"] == "string" + assert response["Tags"][0]["TagValue"] == "string" + + +@parameterized( + ( + (dict(KeySpec="AES_256"), 32), + (dict(KeySpec="AES_128"), 16), + (dict(NumberOfBytes=64), 64), + (dict(NumberOfBytes=1), 1), + (dict(NumberOfBytes=1024), 1024), + ) +) +@mock_kms +def test_generate_data_key_sizes(kwargs, expected_key_length): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="generate-data-key-size") + + response = client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) + + assert len(response["Plaintext"]) == expected_key_length + + +@mock_kms +def test_generate_data_key_decrypt(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="generate-data-key-decrypt") + + resp1 = client.generate_data_key( + KeyId=key["KeyMetadata"]["KeyId"], KeySpec="AES_256" + ) + resp2 = client.decrypt(CiphertextBlob=resp1["CiphertextBlob"]) + + assert resp1["Plaintext"] == resp2["Plaintext"] + + +@parameterized( + ( + (dict(KeySpec="AES_257"),), + (dict(KeySpec="AES_128", NumberOfBytes=16),), + (dict(NumberOfBytes=2048),), + (dict(NumberOfBytes=0),), + (dict(),), + ) +) +@mock_kms +def test_generate_data_key_invalid_size_params(kwargs): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="generate-data-key-size") + + with assert_raises( + (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError) + ) as err: + client.generate_data_key(KeyId=key["KeyMetadata"]["KeyId"], **kwargs) + + +@parameterized( + ( + ("alias/DoesNotExist",), + ("arn:aws:kms:us-east-1:012345678912:alias/DoesNotExist",), + ("d25652e4-d2d2-49f7-929a-671ccda580c6",), + ( + "arn:aws:kms:us-east-1:012345678912:key/d25652e4-d2d2-49f7-929a-671ccda580c6", + ), + ) +) +@mock_kms +def test_generate_data_key_invalid_key(key_id): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.generate_data_key(KeyId=key_id, KeySpec="AES_256") + + +@parameterized( + ( + ("alias/DoesExist", False), + ("arn:aws:kms:us-east-1:012345678912:alias/DoesExist", False), + ("", True), + ("arn:aws:kms:us-east-1:012345678912:key/", True), + ) +) +@mock_kms +def test_generate_data_key_all_valid_key_ids(prefix, append_key_id): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key() + key_id = key["KeyMetadata"]["KeyId"] + client.create_alias(AliasName="alias/DoesExist", TargetKeyId=key_id) + + target_id = prefix + if append_key_id: + target_id += key_id + + client.generate_data_key(KeyId=key_id, NumberOfBytes=32) + + +@mock_kms +def test_generate_data_key_without_plaintext_decrypt(): + client = boto3.client("kms", region_name="us-east-1") + key = client.create_key(Description="generate-data-key-decrypt") + + resp1 = client.generate_data_key_without_plaintext( + KeyId=key["KeyMetadata"]["KeyId"], KeySpec="AES_256" + ) + + assert "Plaintext" not in resp1 + + +@parameterized(PLAINTEXT_VECTORS) +@mock_kms +def test_re_encrypt_decrypt(plaintext): + client = boto3.client("kms", region_name="us-west-2") + + key_1 = client.create_key(Description="key 1") + key_1_id = key_1["KeyMetadata"]["KeyId"] + key_1_arn = key_1["KeyMetadata"]["Arn"] + key_2 = client.create_key(Description="key 2") + key_2_id = key_2["KeyMetadata"]["KeyId"] + key_2_arn = key_2["KeyMetadata"]["Arn"] + + encrypt_response = client.encrypt( + KeyId=key_1_id, Plaintext=plaintext, EncryptionContext={"encryption": "context"} + ) + + re_encrypt_response = client.re_encrypt( + CiphertextBlob=encrypt_response["CiphertextBlob"], + SourceEncryptionContext={"encryption": "context"}, + DestinationKeyId=key_2_id, + DestinationEncryptionContext={"another": "context"}, + ) + + # CiphertextBlob must NOT be base64-encoded + with assert_raises(Exception): + base64.b64decode(re_encrypt_response["CiphertextBlob"], validate=True) + + re_encrypt_response["SourceKeyId"].should.equal(key_1_arn) + re_encrypt_response["KeyId"].should.equal(key_2_arn) + + decrypt_response_1 = client.decrypt( + CiphertextBlob=encrypt_response["CiphertextBlob"], + EncryptionContext={"encryption": "context"}, + ) + decrypt_response_1["Plaintext"].should.equal(_get_encoded_value(plaintext)) + decrypt_response_1["KeyId"].should.equal(key_1_arn) + + decrypt_response_2 = client.decrypt( + CiphertextBlob=re_encrypt_response["CiphertextBlob"], + EncryptionContext={"another": "context"}, + ) + decrypt_response_2["Plaintext"].should.equal(_get_encoded_value(plaintext)) + decrypt_response_2["KeyId"].should.equal(key_2_arn) + + decrypt_response_1["Plaintext"].should.equal(decrypt_response_2["Plaintext"]) + + +@mock_kms +def test_re_encrypt_to_invalid_destination(): + client = boto3.client("kms", region_name="us-west-2") + + key = client.create_key(Description="key 1") + key_id = key["KeyMetadata"]["KeyId"] + + encrypt_response = client.encrypt(KeyId=key_id, Plaintext=b"some plaintext") + + with assert_raises(client.exceptions.NotFoundException): + client.re_encrypt( + CiphertextBlob=encrypt_response["CiphertextBlob"], + DestinationKeyId="alias/DoesNotExist", + ) + + +@parameterized(((12,), (44,), (91,), (1,), (1024,))) +@mock_kms +def test_generate_random(number_of_bytes): + client = boto3.client("kms", region_name="us-west-2") + + response = client.generate_random(NumberOfBytes=number_of_bytes) + + response["Plaintext"].should.be.a(bytes) + len(response["Plaintext"]).should.equal(number_of_bytes) + + +@parameterized( + ( + (2048, botocore.exceptions.ClientError), + (1025, botocore.exceptions.ClientError), + (0, botocore.exceptions.ParamValidationError), + (-1, botocore.exceptions.ParamValidationError), + (-1024, botocore.exceptions.ParamValidationError), + ) +) +@mock_kms +def test_generate_random_invalid_number_of_bytes(number_of_bytes, error_type): + client = boto3.client("kms", region_name="us-west-2") + + with assert_raises(error_type): + client.generate_random(NumberOfBytes=number_of_bytes) + + +@mock_kms +def test_enable_key_rotation_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_disable_key_rotation_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key_rotation(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_enable_key_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.enable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_disable_key_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.disable_key(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_cancel_key_deletion_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.cancel_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_schedule_key_deletion_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.schedule_key_deletion(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_get_key_rotation_status_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_rotation_status(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_get_key_policy_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.get_key_policy( + KeyId="12366f9b-1230-123d-123e-123e6ae60c02", PolicyName="default" + ) + + +@mock_kms +def test_list_key_policies_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.list_key_policies(KeyId="12366f9b-1230-123d-123e-123e6ae60c02") + + +@mock_kms +def test_put_key_policy_key_not_found(): + client = boto3.client("kms", region_name="us-east-1") + + with assert_raises(client.exceptions.NotFoundException): + client.put_key_policy( + KeyId="00000000-0000-0000-0000-000000000000", + PolicyName="default", + Policy="new policy", + ) From 0b7e990bbfed5e5ba8fd80570519dcd81d0a7e04 Mon Sep 17 00:00:00 2001 From: jmsanders <10291790+jmsanders@users.noreply.github.com> Date: Fri, 7 Feb 2020 15:50:08 -0600 Subject: [PATCH 106/125] Limit SQS list_queues response to 1000 queues The maximum number of queues that the ListQueues API can return is 1000: https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ListQueues.html --- moto/sqs/models.py | 2 +- tests/test_sqs/test_sqs.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 8fbe90108..a54d91c43 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -567,7 +567,7 @@ class SQSBackend(BaseBackend): for name, q in self.queues.items(): if prefix_re.search(name): qs.append(q) - return qs + return qs[:1000] def get_queue(self, queue_name): queue = self.queues.get(queue_name) diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 93d388117..f2ab8c37c 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1759,3 +1759,23 @@ def test_receive_message_for_queue_with_receive_message_wait_time_seconds_set(): ) queue.receive_messages() + + +@mock_sqs +def test_list_queues_limits_to_1000_queues(): + client = boto3.client("sqs", region_name="us-east-1") + + for i in range(1001): + client.create_queue(QueueName="test-queue-{0}".format(i)) + + client.list_queues()["QueueUrls"].should.have.length_of(1000) + client.list_queues(QueueNamePrefix="test-queue")["QueueUrls"].should.have.length_of( + 1000 + ) + + resource = boto3.resource("sqs", region_name="us-east-1") + + list(resource.queues.all()).should.have.length_of(1000) + list(resource.queues.filter(QueueNamePrefix="test-queue")).should.have.length_of( + 1000 + ) From d4caf14b61ef1ba1a0bf3998b4773acd67eafb7b Mon Sep 17 00:00:00 2001 From: Nikita Antonenkov Date: Sat, 1 Feb 2020 22:00:15 +0100 Subject: [PATCH 107/125] Fixed UnboundLocalError in dynamodb2.query when no filters are passed --- moto/dynamodb2/responses.py | 7 +++++++ tests/test_dynamodb2/test_dynamodb.py | 21 +++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index c9f3529a9..d3767c3fd 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -508,6 +508,13 @@ class DynamoHandler(BaseResponse): # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} key_conditions = self.body.get("KeyConditions") query_filters = self.body.get("QueryFilter") + + if not (key_conditions or query_filters): + return self.error( + "com.amazonaws.dynamodb.v20111205#ValidationException", + "Either KeyConditions or QueryFilter should be present", + ) + if key_conditions: ( hash_key_name, diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 2e3f9fdbb..ec01889ae 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3721,3 +3721,24 @@ def test_allow_update_to_item_with_different_type(): table.get_item(Key={"job_id": "b"})["Item"]["job_details"][ "job_name" ].should.be.equal({"nested": "yes"}) + + +@mock_dynamodb2 +def test_query_catches_when_no_filters(): + dynamo = boto3.resource("dynamodb", region_name="eu-central-1") + dynamo.create_table( + AttributeDefinitions=[{"AttributeName": "job_id", "AttributeType": "S"}], + TableName="origin-rbu-dev", + KeySchema=[{"AttributeName": "job_id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 1, "WriteCapacityUnits": 1}, + ) + table = dynamo.Table("origin-rbu-dev") + + with assert_raises(ClientError) as ex: + table.query(TableName="original-rbu-dev") + + ex.exception.response["Error"]["Code"].should.equal("ValidationException") + ex.exception.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400) + ex.exception.response["Error"]["Message"].should.equal( + "Either KeyConditions or QueryFilter should be present" + ) From 0ac92969362f47c215e3a3309586d3ce950de28e Mon Sep 17 00:00:00 2001 From: Nikita Antonenkov Date: Sat, 1 Feb 2020 22:05:05 +0100 Subject: [PATCH 108/125] Update .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 0282e3caf..fb9bd51de 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,5 @@ env/ .vscode/ tests/file.tmp .eggs/ +.mypy_cache/ +*.tmp From df031d0f33749454ad2612f5c58ffb2b1042625d Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sat, 8 Feb 2020 10:58:31 +0000 Subject: [PATCH 109/125] #2732 - Created AMI should have AccountID as the OwnerID --- moto/ec2/models.py | 14 ++++---------- tests/test_ec2/test_amis.py | 16 +++++++++++++++- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index a0c886087..166d8e646 100644 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -27,6 +27,7 @@ from moto.core.utils import ( iso_8601_datetime_with_milliseconds, camelcase_to_underscores, ) +from moto.iam.models import ACCOUNT_ID from .exceptions import ( CidrLimitExceeded, DependencyViolationError, @@ -155,7 +156,7 @@ AMIS = _load_resource( ) -OWNER_ID = "111122223333" +OWNER_ID = ACCOUNT_ID def utc_date_and_time(): @@ -1341,7 +1342,7 @@ class AmiBackend(object): source_ami=None, name=name, description=description, - owner_id=context.get_current_user() if context else OWNER_ID, + owner_id=OWNER_ID, ) self.amis[ami_id] = ami return ami @@ -1392,14 +1393,7 @@ class AmiBackend(object): # Limit by owner ids if owners: # support filtering by Owners=['self'] - owners = list( - map( - lambda o: context.get_current_user() - if context and o == "self" - else o, - owners, - ) - ) + owners = list(map(lambda o: OWNER_ID if o == "self" else o, owners,)) images = [ami for ami in images if ami.owner_id in owners] # Generic filters diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index f65352c7c..ad432bb78 100644 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -12,6 +12,7 @@ import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 from moto.ec2.models import AMIS, OWNER_ID +from moto.iam.models import ACCOUNT_ID from tests.helpers import requires_boto_gte @@ -251,6 +252,19 @@ def test_ami_pulls_attributes_from_instance(): image.kernel_id.should.equal("test-kernel") +@mock_ec2_deprecated +def test_ami_uses_account_id_if_valid_access_key_is_supplied(): + access_key = "AKIAXXXXXXXXXXXXXXXX" + conn = boto.connect_ec2(access_key, "the_secret") + reservation = conn.run_instances("ami-1234abcd") + instance = reservation.instances[0] + instance.modify_attribute("kernel", "test-kernel") + + image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") + images = conn.get_all_images(owners=["self"]) + [(ami.id, ami.owner_id) for ami in images].should.equal([(image_id, ACCOUNT_ID)]) + + @mock_ec2_deprecated def test_ami_filters(): conn = boto.connect_ec2("the_key", "the_secret") @@ -773,7 +787,7 @@ def test_ami_filter_wildcard(): instance.create_image(Name="not-matching-image") my_images = ec2_client.describe_images( - Owners=["111122223333"], Filters=[{"Name": "name", "Values": ["test*"]}] + Owners=[ACCOUNT_ID], Filters=[{"Name": "name", "Values": ["test*"]}] )["Images"] my_images.should.have.length_of(1) From e91f1309d103b66fb7ccf049c6f7d9b09891a122 Mon Sep 17 00:00:00 2001 From: Luka Bratos Date: Sat, 8 Feb 2020 17:49:54 +0000 Subject: [PATCH 110/125] Update docs --- docs/docs/getting_started.rst | 39 ++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index d52e76235..ffe37f3a0 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -24,8 +24,7 @@ For example, we have the following code we want to test: .. sourcecode:: python - import boto - from boto.s3.key import Key + import boto3 class MyModel(object): def __init__(self, name, value): @@ -33,11 +32,8 @@ For example, we have the following code we want to test: self.value = value def save(self): - conn = boto.connect_s3() - bucket = conn.get_bucket('mybucket') - k = Key(bucket) - k.key = self.name - k.set_contents_from_string(self.value) + s3 = boto3.client('s3', region_name='us-east-1') + s3.put_object(Bucket='mybucket', Key=self.name, Body=self.value) There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. @@ -48,20 +44,23 @@ With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python - import boto + import boto3 from moto import mock_s3 from mymodule import MyModel @mock_s3 def test_my_model_save(): - conn = boto.connect_s3() + conn = boto3.resource('s3', region_name='us-east-1') # We need to create the bucket since this is all in Moto's 'virtual' AWS account - conn.create_bucket('mybucket') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Context manager ~~~~~~~~~~~~~~~ @@ -72,13 +71,16 @@ Same as the Decorator, every call inside the ``with`` statement is mocked out. def test_my_model_save(): with mock_s3(): - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' Raw ~~~ @@ -91,13 +93,16 @@ You can also start and stop the mocking manually. mock = mock_s3() mock.start() - conn = boto.connect_s3() - conn.create_bucket('mybucket') + conn = boto3.resource('s3', region_name='us-east-1') + conn.create_bucket(Bucket='mybucket') model_instance = MyModel('steve', 'is awesome') model_instance.save() - assert conn.get_bucket('mybucket').get_key('steve').get_contents_as_string() == 'is awesome' + body = conn.Object('mybucket', 'steve').get()[ + 'Body'].read().decode("utf-8") + + assert body == 'is awesome' mock.stop() From 936d6863927d0f4c5784889ccac3e74033135e84 Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 9 Feb 2020 11:47:02 +0000 Subject: [PATCH 111/125] #2580 - DynamoDB update_item: Allow list_append and if_not_exists-functions in one expression --- moto/dynamodb2/models.py | 17 +++++++++++------ tests/test_dynamodb2/test_dynamodb.py | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 6 deletions(-) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 2313a6e41..82c3559ea 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -448,13 +448,18 @@ class Item(BaseModel): if list_append_re: new_value = expression_attribute_values[list_append_re.group(2).strip()] old_list_key = list_append_re.group(1) - # Get the existing value - old_list = self.attrs[old_list_key.split(".")[0]] - if "." in old_list_key: - # Value is nested inside a map - find the appropriate child attr - old_list = old_list.child_attr( - ".".join(old_list_key.split(".")[1:]) + # old_key could be a function itself (if_not_exists) + if old_list_key.startswith("if_not_exists"): + old_list = DynamoType( + expression_attribute_values[self._get_default(old_list_key)] ) + else: + old_list = self.attrs[old_list_key.split(".")[0]] + if "." in old_list_key: + # Value is nested inside a map - find the appropriate child attr + old_list = old_list.child_attr( + ".".join(old_list_key.split(".")[1:]) + ) if not old_list.is_list(): raise ParamValidationError old_list.value.extend([DynamoType(v) for v in new_value["L"]]) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index ec01889ae..fec4c3064 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3609,6 +3609,31 @@ def test_update_supports_list_append_maps(): ) +@mock_dynamodb2 +def test_update_supports_list_append_with_nested_if_not_exists_operation(): + dynamo = boto3.resource("dynamodb") + table_name = "test" + + dynamo.create_table( + TableName=table_name, + AttributeDefinitions=[{"AttributeName": "Id", "AttributeType": "S"}], + KeySchema=[{"AttributeName": "Id", "KeyType": "HASH"}], + ProvisionedThroughput={"ReadCapacityUnits": 20, "WriteCapacityUnits": 20}, + ) + + table = dynamo.Table(table_name) + + table.put_item(Item={"Id": "item-id", "nest1": {"nest2": {}}}) + table.update_item( + Key={"Id": "item-id"}, + UpdateExpression="SET nest1.nest2.event_history = list_append(if_not_exists(nest1.nest2.event_history, :empty_list), :new_value)", + ExpressionAttributeValues={":empty_list": [], ":new_value": ["some_value"]}, + ) + table.get_item(Key={"Id": "item-id"})["Item"].should.equal( + {"Id": "item-id", "nest1": {"nest2": {"event_history": ["some_value"]}}} + ) + + @mock_dynamodb2 def test_update_catches_invalid_list_append_operation(): client = boto3.client("dynamodb", region_name="us-east-1") From 2bd93a76fc2e31524d565461a6716a8bbdcc65fd Mon Sep 17 00:00:00 2001 From: Bert Blommers Date: Sun, 9 Feb 2020 11:58:41 +0000 Subject: [PATCH 112/125] Add region to DDB tests --- tests/test_dynamodb2/test_dynamodb.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index fec4c3064..180f460c0 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -3611,7 +3611,7 @@ def test_update_supports_list_append_maps(): @mock_dynamodb2 def test_update_supports_list_append_with_nested_if_not_exists_operation(): - dynamo = boto3.resource("dynamodb") + dynamo = boto3.resource("dynamodb", region_name="us-west-1") table_name = "test" dynamo.create_table( From fa3904df2962190fa7636763aa39133af2ef5ea3 Mon Sep 17 00:00:00 2001 From: Antonin Date: Mon, 10 Feb 2020 18:09:15 +0100 Subject: [PATCH 113/125] MessageAction for cognito admin_create_user is now handled If an invitation is attempted to be reset to a pool we validate that the user is indeed already in the pool else we raise a UserNotFoundException to match AWS behaviour --- moto/cognitoidp/models.py | 7 +++- moto/cognitoidp/responses.py | 2 + tests/test_cognitoidp/test_cognitoidp.py | 49 ++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 2 deletions(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 96b23a404..810077faf 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -564,12 +564,15 @@ class CognitoIdpBackend(BaseBackend): user.groups.discard(group) # User - def admin_create_user(self, user_pool_id, username, temporary_password, attributes): + def admin_create_user(self, user_pool_id, username, message_action, temporary_password, attributes): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id) - if username in user_pool.users: + if message_action and message_action == "RESEND": + if username not in user_pool.users: + raise UserNotFoundError(username) + elif username in user_pool.users: raise UsernameExistsException(username) user = CognitoIdpUser( diff --git a/moto/cognitoidp/responses.py b/moto/cognitoidp/responses.py index fa3b7b0b5..6c89c4806 100644 --- a/moto/cognitoidp/responses.py +++ b/moto/cognitoidp/responses.py @@ -259,10 +259,12 @@ class CognitoIdpResponse(BaseResponse): def admin_create_user(self): user_pool_id = self._get_param("UserPoolId") username = self._get_param("Username") + message_action = self._get_param("MessageAction") temporary_password = self._get_param("TemporaryPassword") user = cognitoidp_backends[self.region].admin_create_user( user_pool_id, username, + message_action, temporary_password, self._get_param("UserAttributes", []), ) diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 2f7ed11e5..d0a462c5c 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -911,6 +911,55 @@ def test_admin_create_existing_user(): caught.should.be.true +@mock_cognitoidp +def test_admin_resend_invitation_existing_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[{"Name": "thing", "Value": value}], + ) + + caught = False + try: + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[{"Name": "thing", "Value": value}], + MessageAction="RESEND", + ) + except conn.exceptions.UsernameExistsException: + caught = True + + caught.should.be.false + + +@mock_cognitoidp +def test_admin_resend_invitation_missing_user(): + conn = boto3.client("cognito-idp", "us-west-2") + + username = str(uuid.uuid4()) + value = str(uuid.uuid4()) + user_pool_id = conn.create_user_pool(PoolName=str(uuid.uuid4()))["UserPool"]["Id"] + + caught = False + try: + conn.admin_create_user( + UserPoolId=user_pool_id, + Username=username, + UserAttributes=[{"Name": "thing", "Value": value}], + MessageAction="RESEND", + ) + except conn.exceptions.UserNotFoundException: + caught = True + + caught.should.be.true + + @mock_cognitoidp def test_admin_get_user(): conn = boto3.client("cognito-idp", "us-west-2") From f70cd0182e413bca58be077e7f1f90e50ec83f62 Mon Sep 17 00:00:00 2001 From: Terry Griffin <“griffint61@users.noreply.github.com”> Date: Mon, 10 Feb 2020 09:18:25 -0800 Subject: [PATCH 114/125] Fixed test_lambda_can_be_deleted_by_cloudformation for new (correct) error code. --- tests/test_awslambda/test_lambda_cloudformation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda_cloudformation.py b/tests/test_awslambda/test_lambda_cloudformation.py index a5d4d23fd..f57354d69 100644 --- a/tests/test_awslambda/test_lambda_cloudformation.py +++ b/tests/test_awslambda/test_lambda_cloudformation.py @@ -94,7 +94,7 @@ def test_lambda_can_be_deleted_by_cloudformation(): # Verify function was deleted with assert_raises(ClientError) as e: lmbda.get_function(FunctionName=created_fn_name) - e.exception.response["Error"]["Code"].should.equal("404") + e.exception.response["Error"]["Code"].should.equal("ResourceNotFoundException") def create_stack(cf, s3): From 353ad631f088e42b7d171d1dc69d501153020b13 Mon Sep 17 00:00:00 2001 From: Laurie O Date: Sat, 15 Feb 2020 01:18:08 +1000 Subject: [PATCH 115/125] Include closed execution extra info Include 'closeStatus' and 'closeTimestamp' when describing SWF workflow execution using 'describe_workflow_execution' Signed-off-by: Laurie O --- moto/swf/models/workflow_execution.py | 4 +++ .../models/test_workflow_execution.py | 33 +++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 4d91b1f6f..17ce819fb 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -127,6 +127,10 @@ class WorkflowExecution(BaseModel): "executionInfo": self.to_medium_dict(), "executionConfiguration": {"taskList": {"name": self.task_list}}, } + # info + if self.execution_status == "CLOSED": + hsh["executionInfo"]["closeStatus"] = self.close_status + hsh["executionInfo"]["closeTimestamp"] = self.close_timestamp # configuration for key in self._configuration_keys: attr = camelcase_to_underscores(key) diff --git a/tests/test_swf/models/test_workflow_execution.py b/tests/test_swf/models/test_workflow_execution.py index 6c73a9686..503198f46 100644 --- a/tests/test_swf/models/test_workflow_execution.py +++ b/tests/test_swf/models/test_workflow_execution.py @@ -148,6 +148,39 @@ def test_workflow_execution_full_dict_representation(): ) +def test_closed_workflow_execution_full_dict_representation(): + domain = get_basic_domain() + wf_type = WorkflowType( + "test-workflow", + "v1.0", + task_list="queue", + default_child_policy="ABANDON", + default_execution_start_to_close_timeout="300", + default_task_start_to_close_timeout="300", + ) + wfe = WorkflowExecution(domain, wf_type, "ab1234") + wfe.execution_status = "CLOSED" + wfe.close_status = "CANCELED" + wfe.close_timestamp = 1420066801.123 + + fd = wfe.to_full_dict() + medium_dict = wfe.to_medium_dict() + medium_dict["closeStatus"] = "CANCELED" + medium_dict["closeTimestamp"] = 1420066801.123 + fd["executionInfo"].should.equal(medium_dict) + fd["openCounts"]["openTimers"].should.equal(0) + fd["openCounts"]["openDecisionTasks"].should.equal(0) + fd["openCounts"]["openActivityTasks"].should.equal(0) + fd["executionConfiguration"].should.equal( + { + "childPolicy": "ABANDON", + "executionStartToCloseTimeout": "300", + "taskList": {"name": "queue"}, + "taskStartToCloseTimeout": "300", + } + ) + + def test_workflow_execution_list_dict_representation(): domain = get_basic_domain() wf_type = WorkflowType( From 8a51fbe1c99972ea94b08b5926004cadde4cacf2 Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Fri, 14 Feb 2020 12:26:27 -0600 Subject: [PATCH 116/125] add default for apiKeyRequired field on API Gateway methods --- moto/apigateway/models.py | 25 +++++++++++++---- moto/apigateway/responses.py | 7 ++++- tests/test_apigateway/test_apigateway.py | 35 ++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 7 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index ae7bdfac3..937b9b08c 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -83,14 +83,14 @@ class MethodResponse(BaseModel, dict): class Method(BaseModel, dict): - def __init__(self, method_type, authorization_type): + def __init__(self, method_type, authorization_type, **kwargs): super(Method, self).__init__() self.update( dict( httpMethod=method_type, authorizationType=authorization_type, authorizerId=None, - apiKeyRequired=None, + apiKeyRequired=kwargs.get("api_key_required") or False, requestParameters=None, requestModels=None, methodIntegration=None, @@ -158,8 +158,12 @@ class Resource(BaseModel): ) return response.status_code, response.text - def add_method(self, method_type, authorization_type): - method = Method(method_type=method_type, authorization_type=authorization_type) + def add_method(self, method_type, authorization_type, api_key_required): + method = Method( + method_type=method_type, + authorization_type=authorization_type, + api_key_required=api_key_required, + ) self.resource_methods[method_type] = method return method @@ -594,9 +598,18 @@ class APIGatewayBackend(BaseBackend): resource = self.get_resource(function_id, resource_id) return resource.get_method(method_type) - def create_method(self, function_id, resource_id, method_type, authorization_type): + def create_method( + self, + function_id, + resource_id, + method_type, + authorization_type, + api_key_required=None, + ): resource = self.get_resource(function_id, resource_id) - method = resource.add_method(method_type, authorization_type) + method = resource.add_method( + method_type, authorization_type, api_key_required=api_key_required + ) return method def get_stage(self, function_id, stage_name): diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index e10d670c5..6a22a4708 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -145,8 +145,13 @@ class APIGatewayResponse(BaseResponse): return 200, {}, json.dumps(method) elif self.method == "PUT": authorization_type = self._get_param("authorizationType") + api_key_required = self._get_param("apiKeyRequired") method = self.backend.create_method( - function_id, resource_id, method_type, authorization_type + function_id, + resource_id, + method_type, + authorization_type, + api_key_required, ) return 200, {}, json.dumps(method) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 496098e8c..c92fc08f4 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -286,6 +286,41 @@ def test_create_method(): { "httpMethod": "GET", "authorizationType": "none", + "apiKeyRequired": False, + "ResponseMetadata": {"HTTPStatusCode": 200}, + } + ) + + +@mock_apigateway +def test_create_method_apikeyrequired(): + client = boto3.client("apigateway", region_name="us-west-2") + response = client.create_rest_api(name="my_api", description="this is my api") + api_id = response["id"] + + resources = client.get_resources(restApiId=api_id) + root_id = [resource for resource in resources["items"] if resource["path"] == "/"][ + 0 + ]["id"] + + client.put_method( + restApiId=api_id, + resourceId=root_id, + httpMethod="GET", + authorizationType="none", + apiKeyRequired=True, + ) + + response = client.get_method(restApiId=api_id, resourceId=root_id, httpMethod="GET") + + # this is hard to match against, so remove it + response["ResponseMetadata"].pop("HTTPHeaders", None) + response["ResponseMetadata"].pop("RetryAttempts", None) + response.should.equal( + { + "httpMethod": "GET", + "authorizationType": "none", + "apiKeyRequired": True, "ResponseMetadata": {"HTTPStatusCode": 200}, } ) From 92fc39d7bbb40d02aa96f0cc244e74eecd60f23b Mon Sep 17 00:00:00 2001 From: Jon Beilke Date: Mon, 17 Feb 2020 15:08:09 -0600 Subject: [PATCH 117/125] add Arn to cognito user pool model and response --- moto/cognitoidp/models.py | 5 +++++ tests/test_cognitoidp/test_cognitoidp.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 96b23a404..2394c64ee 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -14,6 +14,7 @@ from jose import jws from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID from .exceptions import ( GroupExistsException, NotAuthorizedError, @@ -69,6 +70,9 @@ class CognitoIdpUserPool(BaseModel): def __init__(self, region, name, extended_config): self.region = region self.id = "{}_{}".format(self.region, str(uuid.uuid4().hex)) + self.arn = "arn:aws:cognito-idp:{}:{}:userpool/{}".format( + self.region, DEFAULT_ACCOUNT_ID, self.id + ) self.name = name self.status = None self.extended_config = extended_config or {} @@ -91,6 +95,7 @@ class CognitoIdpUserPool(BaseModel): def _base_json(self): return { "Id": self.id, + "Arn": self.arn, "Name": self.name, "Status": self.status, "CreationDate": time.mktime(self.creation_date.timetuple()), diff --git a/tests/test_cognitoidp/test_cognitoidp.py b/tests/test_cognitoidp/test_cognitoidp.py index 2f7ed11e5..d37cf7d5c 100644 --- a/tests/test_cognitoidp/test_cognitoidp.py +++ b/tests/test_cognitoidp/test_cognitoidp.py @@ -27,6 +27,11 @@ def test_create_user_pool(): result["UserPool"]["Id"].should_not.be.none result["UserPool"]["Id"].should.match(r"[\w-]+_[0-9a-zA-Z]+") + result["UserPool"]["Arn"].should.equal( + "arn:aws:cognito-idp:us-west-2:{}:userpool/{}".format( + ACCOUNT_ID, result["UserPool"]["Id"] + ) + ) result["UserPool"]["Name"].should.equal(name) result["UserPool"]["LambdaConfig"]["PreSignUp"].should.equal(value) From aeb194fc57167c2df11741aa70a14aab758bdd98 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 17:29:00 -0600 Subject: [PATCH 118/125] Update new lambda test to work with updated status codes. CC #2642. --- tests/test_awslambda/test_lambda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index d26d78fd4..4f0bc5063 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -150,7 +150,7 @@ def test_invoke_requestresponse_function_with_arn(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) From 01f3b60c09109f6805ed01e6e4eb27c693a6c30a Mon Sep 17 00:00:00 2001 From: Daniel Wallace Date: Mon, 17 Feb 2020 17:38:53 -0600 Subject: [PATCH 119/125] Allow ports in k8s service urls for s3 mock If there is a port in the host for the request, then this if statement is not tripped. --- moto/s3/responses.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6041201bf..0e68a3116 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -168,7 +168,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): or host.startswith("localhost") or host.startswith("localstack") or re.match(r"^[^.]+$", host) - or re.match(r"^.*\.svc\.cluster\.local$", host) + or re.match(r"^.*\.svc\.cluster\.local:?\d*$", host) ): # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), # (3) local host names that do not contain a "." (e.g., Docker container host names), or From 11b7be0e85053eb60b55142f26693e5b59e37b54 Mon Sep 17 00:00:00 2001 From: Mike Grima Date: Thu, 13 Feb 2020 18:01:44 -0800 Subject: [PATCH 120/125] Implemented S3 Account-level public access block. - Also added AWS Config listing and fetching support - Also fixed Lambda test breakage --- .travis.yml | 3 +- README.md | 10 + moto/config/models.py | 55 ++-- moto/core/models.py | 11 +- moto/s3/config.py | 147 +++++++++++ moto/s3/exceptions.py | 9 + moto/s3/models.py | 43 +++- moto/s3/responses.py | 136 ++++++++-- moto/s3/urls.py | 4 +- tests/test_awslambda/test_lambda.py | 2 +- tests/test_core/test_server.py | 2 +- tests/test_s3/test_s3.py | 375 ++++++++++++++++++++++++++++ 12 files changed, 746 insertions(+), 51 deletions(-) diff --git a/.travis.yml b/.travis.yml index ac9322211..8f218134b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,11 +26,12 @@ install: fi docker run --rm -t --name motoserver -e TEST_SERVER_MODE=true -e AWS_SECRET_ACCESS_KEY=server_secret -e AWS_ACCESS_KEY_ID=server_key -v `pwd`:/moto -p 5000:5000 -v /var/run/docker.sock:/var/run/docker.sock python:${PYTHON_DOCKER_TAG} /moto/travis_moto_server.sh & fi + travis_retry pip install -r requirements-dev.txt travis_retry pip install boto==2.45.0 travis_retry pip install boto3 travis_retry pip install dist/moto*.gz travis_retry pip install coveralls==1.1 - travis_retry pip install -r requirements-dev.txt + travis_retry pip install coverage==4.5.4 if [ "$TEST_SERVER_MODE" = "true" ]; then python wait_for.py diff --git a/README.md b/README.md index f5c45a6b6..6fb942aef 100644 --- a/README.md +++ b/README.md @@ -450,6 +450,16 @@ boto3.resource( ) ``` +### Caveats +The standalone server has some caveats with some services. The following services +require that you update your hosts file for your code to work properly: + +1. `s3-control` + +For the above services, this is required because the hostname is in the form of `AWS_ACCOUNT_ID.localhost`. +As a result, you need to add that entry to your host file for your tests to function properly. + + ## Install diff --git a/moto/config/models.py b/moto/config/models.py index 45dccd1ba..a66576979 100644 --- a/moto/config/models.py +++ b/moto/config/models.py @@ -43,7 +43,7 @@ from moto.config.exceptions import ( ) from moto.core import BaseBackend, BaseModel -from moto.s3.config import s3_config_query +from moto.s3.config import s3_account_public_access_block_query, s3_config_query from moto.core import ACCOUNT_ID as DEFAULT_ACCOUNT_ID @@ -58,7 +58,10 @@ POP_STRINGS = [ DEFAULT_PAGE_SIZE = 100 # Map the Config resource type to a backend: -RESOURCE_MAP = {"AWS::S3::Bucket": s3_config_query} +RESOURCE_MAP = { + "AWS::S3::Bucket": s3_config_query, + "AWS::S3::AccountPublicAccessBlock": s3_account_public_access_block_query, +} def datetime2int(date): @@ -867,16 +870,17 @@ class ConfigBackend(BaseBackend): backend_region=backend_query_region, ) - result = { - "resourceIdentifiers": [ - { - "resourceType": identifier["type"], - "resourceId": identifier["id"], - "resourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = {"resourceType": identifier["type"], "resourceId": identifier["id"]} + + # Some resource types lack names: + if identifier.get("name"): + item["resourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"resourceIdentifiers": resource_identifiers} if new_token: result["nextToken"] = new_token @@ -927,18 +931,21 @@ class ConfigBackend(BaseBackend): resource_region=resource_region, ) - result = { - "ResourceIdentifiers": [ - { - "SourceAccountId": DEFAULT_ACCOUNT_ID, - "SourceRegion": identifier["region"], - "ResourceType": identifier["type"], - "ResourceId": identifier["id"], - "ResourceName": identifier["name"], - } - for identifier in identifiers - ] - } + resource_identifiers = [] + for identifier in identifiers: + item = { + "SourceAccountId": DEFAULT_ACCOUNT_ID, + "SourceRegion": identifier["region"], + "ResourceType": identifier["type"], + "ResourceId": identifier["id"], + } + + if identifier.get("name"): + item["ResourceName"] = identifier["name"] + + resource_identifiers.append(item) + + result = {"ResourceIdentifiers": resource_identifiers} if new_token: result["NextToken"] = new_token diff --git a/moto/core/models.py b/moto/core/models.py index 3be3bbd8e..ffb2ffd9f 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -606,12 +606,13 @@ class ConfigQueryModel(object): As such, the proper way to implement is to first obtain a full list of results from all the region backends, and then filter from there. It may be valuable to make this a concatenation of the region and resource name. - :param resource_region: - :param resource_ids: - :param resource_name: - :param limit: - :param next_token: + :param resource_ids: A list of resource IDs + :param resource_name: The individual name of a resource + :param limit: How many per page + :param next_token: The item that will page on :param backend_region: The region for the backend to pull results from. Set to `None` if this is an aggregated query. + :param resource_region: The region for where the resources reside to pull results from. Set to `None` if this is a + non-aggregated query. :return: This should return a list of Dicts that have the following fields: [ { diff --git a/moto/s3/config.py b/moto/s3/config.py index 8098addfc..04b4315f3 100644 --- a/moto/s3/config.py +++ b/moto/s3/config.py @@ -1,8 +1,13 @@ +import datetime import json +import time + +from boto3 import Session from moto.core.exceptions import InvalidNextTokenException from moto.core.models import ConfigQueryModel from moto.s3 import s3_backends +from moto.s3.models import get_moto_s3_account_id class S3ConfigQuery(ConfigQueryModel): @@ -118,4 +123,146 @@ class S3ConfigQuery(ConfigQueryModel): return config_data +class S3AccountPublicAccessBlockConfigQuery(ConfigQueryModel): + def list_config_service_resources( + self, + resource_ids, + resource_name, + limit, + next_token, + backend_region=None, + resource_region=None, + ): + # For the Account Public Access Block, they are the same for all regions. The resource ID is the AWS account ID + # There is no resource name -- it should be a blank string "" if provided. + + # The resource name can only ever be None or an empty string: + if resource_name is not None and resource_name != "": + return [], None + + pab = None + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # If a resource ID was passed in, then filter accordingly: + if resource_ids: + for id in resource_ids: + if account_id == id: + pab = self.backends["global"].account_public_access_block + break + + # Otherwise, just grab the one from the backend: + if not resource_ids: + pab = self.backends["global"].account_public_access_block + + # If it's not present, then return nothing + if not pab: + return [], None + + # Filter on regions (and paginate on them as well): + if backend_region: + pab_list = [backend_region] + elif resource_region: + # Invalid region? + if resource_region not in regions: + return [], None + + pab_list = [resource_region] + + # Aggregated query where no regions were supplied so return them all: + else: + pab_list = regions + + # Pagination logic: + sorted_regions = sorted(pab_list) + new_token = None + + # Get the start: + if not next_token: + start = 0 + else: + # Tokens for this moto feature is just the region-name: + # For OTHER non-global resource types, it's the region concatenated with the resource ID. + if next_token not in sorted_regions: + raise InvalidNextTokenException() + + start = sorted_regions.index(next_token) + + # Get the list of items to collect: + pab_list = sorted_regions[start : (start + limit)] + + if len(sorted_regions) > (start + limit): + new_token = sorted_regions[start + limit] + + return ( + [ + { + "type": "AWS::S3::AccountPublicAccessBlock", + "id": account_id, + "region": region, + } + for region in pab_list + ], + new_token, + ) + + def get_config_resource( + self, resource_id, resource_name=None, backend_region=None, resource_region=None + ): + # Do we even have this defined? + if not self.backends["global"].account_public_access_block: + return None + + # Resource name can only ever be "" if it's supplied: + if resource_name is not None and resource_name != "": + return None + + # Are we filtering based on region? + account_id = get_moto_s3_account_id() + regions = [region for region in Session().get_available_regions("config")] + + # Is the resource ID correct?: + if account_id == resource_id: + if backend_region: + pab_region = backend_region + + # Invalid region? + elif resource_region not in regions: + return None + + else: + pab_region = resource_region + + else: + return None + + # Format the PAB to the AWS Config format: + creation_time = datetime.datetime.utcnow() + config_data = { + "version": "1.3", + "accountId": account_id, + "configurationItemCaptureTime": str(creation_time), + "configurationItemStatus": "OK", + "configurationStateId": str( + int(time.mktime(creation_time.timetuple())) + ), # PY2 and 3 compatible + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": account_id, + "awsRegion": pab_region, + "availabilityZone": "Not Applicable", + "configuration": self.backends[ + "global" + ].account_public_access_block.to_config_dict(), + "supplementaryConfiguration": {}, + } + + # The 'configuration' field is also a JSON string: + config_data["configuration"] = json.dumps(config_data["configuration"]) + + return config_data + + s3_config_query = S3ConfigQuery(s3_backends) +s3_account_public_access_block_query = S3AccountPublicAccessBlockConfigQuery( + s3_backends +) diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index bc339772e..e26f384d5 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -359,3 +359,12 @@ class InvalidPublicAccessBlockConfiguration(S3ClientError): *args, **kwargs ) + + +class WrongPublicAccessBlockAccountIdError(S3ClientError): + code = 403 + + def __init__(self): + super(WrongPublicAccessBlockAccountIdError, self).__init__( + "AccessDenied", "Access Denied" + ) diff --git a/moto/s3/models.py b/moto/s3/models.py index fe8e908ef..5a665e27e 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -19,7 +19,7 @@ import uuid import six from bisect import insort -from moto.core import BaseBackend, BaseModel +from moto.core import ACCOUNT_ID, BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime from .exceptions import ( BucketAlreadyExists, @@ -37,6 +37,7 @@ from .exceptions import ( CrossLocationLoggingProhibitted, NoSuchPublicAccessBlockConfiguration, InvalidPublicAccessBlockConfiguration, + WrongPublicAccessBlockAccountIdError, ) from .utils import clean_key_name, _VersionedKeyStore @@ -58,6 +59,13 @@ DEFAULT_TEXT_ENCODING = sys.getdefaultencoding() OWNER = "75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a" +def get_moto_s3_account_id(): + """This makes it easy for mocking AWS Account IDs when using AWS Config + -- Simply mock.patch the ACCOUNT_ID here, and Config gets it for free. + """ + return ACCOUNT_ID + + class FakeDeleteMarker(BaseModel): def __init__(self, key): self.key = key @@ -1163,6 +1171,7 @@ class FakeBucket(BaseModel): class S3Backend(BaseBackend): def __init__(self): self.buckets = {} + self.account_public_access_block = None def create_bucket(self, bucket_name, region_name): if bucket_name in self.buckets: @@ -1264,6 +1273,16 @@ class S3Backend(BaseBackend): return bucket.public_access_block + def get_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not self.account_public_access_block: + raise NoSuchPublicAccessBlockConfiguration() + + return self.account_public_access_block + def set_key( self, bucket_name, key_name, value, storage=None, etag=None, multipart=None ): @@ -1356,6 +1375,13 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.public_access_block = None + def delete_account_public_access_block(self, account_id): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + self.account_public_access_block = None + def put_bucket_notification_configuration(self, bucket_name, notification_config): bucket = self.get_bucket(bucket_name) bucket.set_notification_configuration(notification_config) @@ -1384,6 +1410,21 @@ class S3Backend(BaseBackend): pub_block_config.get("RestrictPublicBuckets"), ) + def put_account_public_access_block(self, account_id, pub_block_config): + # The account ID should equal the account id that is set for Moto: + if account_id != ACCOUNT_ID: + raise WrongPublicAccessBlockAccountIdError() + + if not pub_block_config: + raise InvalidPublicAccessBlockConfiguration() + + self.account_public_access_block = PublicAccessBlock( + pub_block_config.get("BlockPublicAcls"), + pub_block_config.get("IgnorePublicAcls"), + pub_block_config.get("BlockPublicPolicy"), + pub_block_config.get("RestrictPublicBuckets"), + ) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6041201bf..4cb6e5288 100644 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,6 +4,7 @@ import re import sys import six +from botocore.awsrequest import AWSPreparedRequest from moto.core.utils import str_to_rfc_1123_datetime, py2_strip_unicode_keys from six.moves.urllib.parse import parse_qs, urlparse, unquote @@ -123,6 +124,11 @@ ACTION_MAP = { "uploadId": "PutObject", }, }, + "CONTROL": { + "GET": {"publicAccessBlock": "GetPublicAccessBlock"}, + "PUT": {"publicAccessBlock": "PutPublicAccessBlock"}, + "DELETE": {"publicAccessBlock": "DeletePublicAccessBlock"}, + }, } @@ -220,7 +226,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): # Depending on which calling format the client is using, we don't know # if this is a bucket or key request so we have to check if self.subdomain_based_buckets(request): - return self.key_response(request, full_url, headers) + return self.key_or_control_response(request, full_url, headers) else: # Using path-based buckets return self.bucket_response(request, full_url, headers) @@ -287,7 +293,7 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): return self._bucket_response_post(request, body, bucket_name) else: raise NotImplementedError( - "Method {0} has not been impelemented in the S3 backend yet".format( + "Method {0} has not been implemented in the S3 backend yet".format( method ) ) @@ -595,6 +601,20 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): pass return False + def _parse_pab_config(self, body): + parsed_xml = xmltodict.parse(body) + parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) + + # If Python 2, fix the unicode strings: + if sys.version_info[0] < 3: + parsed_xml = { + "PublicAccessBlockConfiguration": py2_strip_unicode_keys( + dict(parsed_xml["PublicAccessBlockConfiguration"]) + ) + } + + return parsed_xml + def _bucket_response_put( self, request, body, region_name, bucket_name, querystring ): @@ -673,19 +693,9 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): raise e elif "publicAccessBlock" in querystring: - parsed_xml = xmltodict.parse(body) - parsed_xml["PublicAccessBlockConfiguration"].pop("@xmlns", None) - - # If Python 2, fix the unicode strings: - if sys.version_info[0] < 3: - parsed_xml = { - "PublicAccessBlockConfiguration": py2_strip_unicode_keys( - dict(parsed_xml["PublicAccessBlockConfiguration"]) - ) - } - + pab_config = self._parse_pab_config(body) self.backend.put_bucket_public_access_block( - bucket_name, parsed_xml["PublicAccessBlockConfiguration"] + bucket_name, pab_config["PublicAccessBlockConfiguration"] ) return "" @@ -870,15 +880,21 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) return 206, response_headers, response_content[begin : end + 1] - def key_response(self, request, full_url, headers): + def key_or_control_response(self, request, full_url, headers): + # Key and Control are lumped in because splitting out the regex is too much of a pain :/ self.method = request.method self.path = self._get_path(request) self.headers = request.headers if "host" not in self.headers: self.headers["host"] = urlparse(full_url).netloc response_headers = {} + try: - response = self._key_response(request, full_url, headers) + # Is this an S3 control response? + if isinstance(request, AWSPreparedRequest) and "s3-control" in request.url: + response = self._control_response(request, full_url, headers) + else: + response = self._key_response(request, full_url, headers) except S3ClientError as s3error: response = s3error.code, {}, s3error.description @@ -894,6 +910,94 @@ class ResponseObject(_TemplateEnvironmentMixin, ActionAuthenticatorMixin): ) return status_code, response_headers, response_content + def _control_response(self, request, full_url, headers): + parsed_url = urlparse(full_url) + query = parse_qs(parsed_url.query, keep_blank_values=True) + method = request.method + + if hasattr(request, "body"): + # Boto + body = request.body + if hasattr(body, "read"): + body = body.read() + else: + # Flask server + body = request.data + if body is None: + body = b"" + + if method == "GET": + return self._control_response_get(request, query, headers) + elif method == "PUT": + return self._control_response_put(request, body, query, headers) + elif method == "DELETE": + return self._control_response_delete(request, query, headers) + else: + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format( + method + ) + ) + + def _control_response_get(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "GET", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + public_block_config = self.backend.get_account_public_access_block( + headers["x-amz-account-id"] + ) + template = self.response_template(S3_PUBLIC_ACCESS_BLOCK_CONFIGURATION) + return ( + 200, + response_headers, + template.render(public_block_config=public_block_config), + ) + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_put(self, request, body, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "PUT", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + pab_config = self._parse_pab_config(body) + self.backend.put_account_public_access_block( + headers["x-amz-account-id"], + pab_config["PublicAccessBlockConfiguration"], + ) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + + def _control_response_delete(self, request, query, headers): + action = self.path.split("?")[0].split("/")[ + -1 + ] # Gets the action out of the URL sans query params. + self._set_action("CONTROL", "DELETE", action) + self._authenticate_and_authorize_s3_action() + + response_headers = {} + if "publicAccessBlock" in action: + self.backend.delete_account_public_access_block(headers["x-amz-account-id"]) + return 200, response_headers, "" + + raise NotImplementedError( + "Method {0} has not been implemented in the S3 backend yet".format(action) + ) + def _key_response(self, request, full_url, headers): parsed_url = urlparse(full_url) query = parse_qs(parsed_url.query, keep_blank_values=True) diff --git a/moto/s3/urls.py b/moto/s3/urls.py index 7241dbef1..752762184 100644 --- a/moto/s3/urls.py +++ b/moto/s3/urls.py @@ -13,7 +13,7 @@ url_paths = { # subdomain key of path-based bucket "{0}/(?P[^/]+)/?$": S3ResponseInstance.ambiguous_response, # path-based bucket + key - "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_response, + "{0}/(?P[^/]+)/(?P.+)": S3ResponseInstance.key_or_control_response, # subdomain bucket + key with empty first part of path - "{0}//(?P.*)$": S3ResponseInstance.key_response, + "{0}//(?P.*)$": S3ResponseInstance.key_or_control_response, } diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index d26d78fd4..4f0bc5063 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -150,7 +150,7 @@ def test_invoke_requestresponse_function_with_arn(): Payload=json.dumps(in_data), ) - success_result["StatusCode"].should.equal(202) + success_result["StatusCode"].should.equal(200) result_obj = json.loads( base64.b64decode(success_result["LogResult"]).decode("utf-8") ) diff --git a/tests/test_core/test_server.py b/tests/test_core/test_server.py index 5514223af..205a2ad0f 100644 --- a/tests/test_core/test_server.py +++ b/tests/test_core/test_server.py @@ -46,4 +46,4 @@ def test_domain_dispatched_with_service(): dispatcher = DomainDispatcherApplication(create_backend_app, service="s3") backend_app = dispatcher.get_application({"HTTP_HOST": "s3.us-east1.amazonaws.com"}) keys = set(backend_app.view_functions.keys()) - keys.should.contain("ResponseObject.key_response") + keys.should.contain("ResponseObject.key_or_control_response") diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 56cbe547b..7f750cabd 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -5,6 +5,7 @@ import datetime import os import sys +from boto3 import Session from six.moves.urllib.request import urlopen from six.moves.urllib.error import HTTPError from functools import wraps @@ -1135,6 +1136,380 @@ if not settings.TEST_SERVER_MODE: "The unspecified location constraint is incompatible for the region specific endpoint this request was sent to." ) + # All tests for s3-control cannot be run under the server without a modification of the + # hosts file on your system. This is due to the fact that the URL to the host is in the form of: + # ACCOUNT_ID.s3-control.amazonaws.com <-- That Account ID part is the problem. If you want to + # make use of the moto server, update your hosts file for `THE_ACCOUNT_ID_FOR_MOTO.localhost` + # and this will work fine. + + @mock_s3 + def test_get_public_access_block_for_account(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + + # With an invalid account ID: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId="111111111111") + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Without one defined: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.exception.response["Error"]["Code"] + == "NoSuchPublicAccessBlockConfiguration" + ) + + # Put a with an invalid account ID: + with assert_raises(ClientError) as ce: + client.put_public_access_block( + AccountId="111111111111", + PublicAccessBlockConfiguration={"BlockPublicAcls": True}, + ) + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Put with an invalid PAB: + with assert_raises(ClientError) as ce: + client.put_public_access_block( + AccountId=ACCOUNT_ID, PublicAccessBlockConfiguration={} + ) + assert ce.exception.response["Error"]["Code"] == "InvalidRequest" + assert ( + "Must specify at least one configuration." + in ce.exception.response["Error"]["Message"] + ) + + # Correct PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the correct PAB (for all regions): + for region in Session().get_available_regions("s3control"): + region_client = boto3.client("s3control", region_name=region) + assert region_client.get_public_access_block(AccountId=ACCOUNT_ID)[ + "PublicAccessBlockConfiguration" + ] == { + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + } + + # Delete with an invalid account ID: + with assert_raises(ClientError) as ce: + client.delete_public_access_block(AccountId="111111111111") + assert ce.exception.response["Error"]["Code"] == "AccessDenied" + + # Delete successfully: + client.delete_public_access_block(AccountId=ACCOUNT_ID) + + # Confirm that it's deleted: + with assert_raises(ClientError) as ce: + client.get_public_access_block(AccountId=ACCOUNT_ID) + assert ( + ce.exception.response["Error"]["Code"] + == "NoSuchPublicAccessBlockConfiguration" + ) + + @mock_s3 + @mock_config + def test_config_list_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + assert not result["ResourceIdentifiers"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Test that successful queries work (non-aggregated): + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", + resourceIds=[ACCOUNT_ID, "nope"], + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="" + ) + assert result["resourceIdentifiers"] == [ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + + # Test that successful queries work (aggregated): + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + ) + regions = {region for region in Session().get_available_regions("config")} + for r in result["ResourceIdentifiers"]: + regions.remove(r.pop("SourceRegion")) + assert r == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + } + + # Just check that the len is the same -- this should be reasonable + regions = {region for region in Session().get_available_regions("config")} + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": ""}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "", "ResourceId": ACCOUNT_ID}, + ) + assert len(regions) == len(result["ResourceIdentifiers"]) + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={ + "ResourceName": "", + "ResourceId": ACCOUNT_ID, + "Region": "us-west-2", + }, + ) + assert ( + result["ResourceIdentifiers"][0]["SourceRegion"] == "us-west-2" + and len(result["ResourceIdentifiers"]) == 1 + ) + + # Test aggregator pagination: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + ) + regions = sorted( + [region for region in Session().get_available_regions("config")] + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[0], + } + assert result["NextToken"] == regions[1] + + # Get the next region: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Limit=1, + NextToken=regions[1], + ) + assert result["ResourceIdentifiers"][0] == { + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "SourceAccountId": ACCOUNT_ID, + "ResourceId": ACCOUNT_ID, + "SourceRegion": regions[1], + } + + # Non-aggregated with incorrect info: + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceName="nope" + ) + assert not result["resourceIdentifiers"] + result = config_client.list_discovered_resources( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceIds=["nope"] + ) + assert not result["resourceIdentifiers"] + + # Aggregated with incorrect info: + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceName": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"ResourceId": "nope"}, + ) + assert not result["ResourceIdentifiers"] + result = config_client.list_aggregate_discovered_resources( + ResourceType="AWS::S3::AccountPublicAccessBlock", + ConfigurationAggregatorName="testing", + Filters={"Region": "Nope"}, + ) + assert not result["ResourceIdentifiers"] + + @mock_s3 + @mock_config + def test_config_get_account_pab(): + from moto.s3.models import ACCOUNT_ID + + client = boto3.client("s3control", region_name="us-west-2") + config_client = boto3.client("config", region_name="us-west-2") + + # Create the aggregator: + account_aggregation_source = { + "AccountIds": [ACCOUNT_ID], + "AllAwsRegions": True, + } + config_client.put_configuration_aggregator( + ConfigurationAggregatorName="testing", + AccountAggregationSources=[account_aggregation_source], + ) + + # Without a PAB in place: + with assert_raises(ClientError) as ce: + config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ( + ce.exception.response["Error"]["Code"] == "ResourceNotDiscoveredException" + ) + # aggregate + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": "ACCOUNT_ID", + } + ] + ) + assert not result["baseConfigurationItems"] + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": "us-west-2", + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert not result["BaseConfigurationItems"] + + # Create a PAB: + client.put_public_access_block( + AccountId=ACCOUNT_ID, + PublicAccessBlockConfiguration={ + "BlockPublicAcls": True, + "IgnorePublicAcls": True, + "BlockPublicPolicy": True, + "RestrictPublicBuckets": True, + }, + ) + + # Get the proper config: + proper_config = { + "blockPublicAcls": True, + "ignorePublicAcls": True, + "blockPublicPolicy": True, + "restrictPublicBuckets": True, + } + result = config_client.get_resource_config_history( + resourceType="AWS::S3::AccountPublicAccessBlock", resourceId=ACCOUNT_ID + ) + assert ( + json.loads(result["configurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["configurationItems"][0]["accountId"] + == result["configurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + result = config_client.batch_get_resource_config( + resourceKeys=[ + { + "resourceType": "AWS::S3::AccountPublicAccessBlock", + "resourceId": ACCOUNT_ID, + } + ] + ) + assert len(result["baseConfigurationItems"]) == 1 + assert ( + json.loads(result["baseConfigurationItems"][0]["configuration"]) + == proper_config + ) + assert ( + result["baseConfigurationItems"][0]["accountId"] + == result["baseConfigurationItems"][0]["resourceId"] + == ACCOUNT_ID + ) + + for region in Session().get_available_regions("s3control"): + result = config_client.batch_get_aggregate_resource_config( + ConfigurationAggregatorName="testing", + ResourceIdentifiers=[ + { + "SourceAccountId": ACCOUNT_ID, + "SourceRegion": region, + "ResourceId": ACCOUNT_ID, + "ResourceType": "AWS::S3::AccountPublicAccessBlock", + "ResourceName": "", + } + ], + ) + assert len(result["BaseConfigurationItems"]) == 1 + assert ( + json.loads(result["BaseConfigurationItems"][0]["configuration"]) + == proper_config + ) + @mock_s3_deprecated def test_ranged_get(): From f111dd7febca9377056d0860e6b481e7797b446e Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 18:21:08 -0600 Subject: [PATCH 121/125] Update sphinx build version. --- docs/conf.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 28a4b4e6b..a902d0ecf 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -56,9 +56,10 @@ author = 'Steve Pulec' # built documents. # # The short X.Y version. -version = '0.4.10' +import moto +version = moto.__version__ # The full version, including alpha/beta/rc tags. -release = '0.4.10' +release = moto.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. From 356c55f99d8cb2eec66ad9a1b2ae493671555bd3 Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:21:18 -0600 Subject: [PATCH 122/125] Fix default resourceMethod for API Gateway. Closes #2750. --- moto/apigateway/models.py | 5 +++-- tests/test_apigateway/test_apigateway.py | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index 937b9b08c..dcc38efc9 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -117,14 +117,15 @@ class Resource(BaseModel): self.api_id = api_id self.path_part = path_part self.parent_id = parent_id - self.resource_methods = {"GET": {}} + self.resource_methods = {} def to_dict(self): response = { "path": self.get_path(), "id": self.id, - "resourceMethods": self.resource_methods, } + if self.resource_methods: + response["resourceMethods"] = self.resource_methods if self.parent_id: response["parentId"] = self.parent_id response["pathPart"] = self.path_part diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index c92fc08f4..1b422e875 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -208,7 +208,6 @@ def test_create_resource(): "path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, } ) @@ -257,7 +256,6 @@ def test_child_resource(): "parentId": users_id, "id": tags_id, "ResponseMetadata": {"HTTPStatusCode": 200}, - "resourceMethods": {"GET": {}}, } ) From dcd1f0195fbe2a0f30b22515856b55f3eddf518c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:32:19 -0600 Subject: [PATCH 123/125] lint. --- tests/test_apigateway/test_apigateway.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 1b422e875..8692ccc9b 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -204,11 +204,7 @@ def test_create_resource(): root_resource["ResponseMetadata"].pop("HTTPHeaders", None) root_resource["ResponseMetadata"].pop("RetryAttempts", None) root_resource.should.equal( - { - "path": "/", - "id": root_id, - "ResponseMetadata": {"HTTPStatusCode": 200}, - } + {"path": "/", "id": root_id, "ResponseMetadata": {"HTTPStatusCode": 200},} ) client.create_resource(restApiId=api_id, parentId=root_id, pathPart="users") From 11e64109eb2ebc0480ab9ce84d6fdf2f177f7c7c Mon Sep 17 00:00:00 2001 From: Steve Pulec Date: Mon, 17 Feb 2020 20:32:28 -0600 Subject: [PATCH 124/125] Fix s3 test for location constraint. --- tests/test_s3/test_s3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index b4badcaf0..2193f8b27 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -2145,7 +2145,7 @@ def test_boto3_copy_object_from_unversioned_to_versioned_bucket(): @mock_s3 def test_boto3_copy_object_with_replacement_tagging(): - client = boto3.client("s3", region_name="eu-north-1") + client = boto3.client("s3", region_name=DEFAULT_REGION_NAME) client.create_bucket(Bucket="mybucket") client.put_object( Bucket="mybucket", Key="original", Body=b"test", Tagging="tag=old" From cb68204994bf0aea93b35aebd5159949518a76f0 Mon Sep 17 00:00:00 2001 From: Antonin Date: Tue, 18 Feb 2020 10:50:24 +0100 Subject: [PATCH 125/125] lint --- moto/cognitoidp/models.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/moto/cognitoidp/models.py b/moto/cognitoidp/models.py index 810077faf..2f2f7e870 100644 --- a/moto/cognitoidp/models.py +++ b/moto/cognitoidp/models.py @@ -564,7 +564,9 @@ class CognitoIdpBackend(BaseBackend): user.groups.discard(group) # User - def admin_create_user(self, user_pool_id, username, message_action, temporary_password, attributes): + def admin_create_user( + self, user_pool_id, username, message_action, temporary_password, attributes + ): user_pool = self.user_pools.get(user_pool_id) if not user_pool: raise ResourceNotFoundError(user_pool_id)