diff --git a/CHANGELOG.md b/CHANGELOG.md index 94819aa8d..069569c5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,23 @@ Moto Changelog Latest ------ +1.1.24 +----- + + * Implemented Batch + * Fixed regression with moto_server dashboard + * Fixed and closed many outstanding bugs + * Fixed serious performance problem with EC2 reservation listing + * Fixed Route53 list_resource_record_sets + +1.1.23 +----- + + * Implemented X-Ray + * Implemented Autoscaling EC2 attachment + * Implemented Autoscaling Load Balancer methods + * Improved DynamoDB filter expressions + 1.1.22 ----- diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md new file mode 100644 index 000000000..172c03f1a --- /dev/null +++ b/IMPLEMENTATION_COVERAGE.md @@ -0,0 +1,3651 @@ + +## acm - 50% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email + +## apigateway - 18% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [ ] create_usage_plan +- [ ] create_usage_plan_key +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [ ] delete_usage_plan +- [ ] delete_usage_plan_key +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_usage +- [ ] get_usage_plan +- [ ] get_usage_plan_key +- [ ] get_usage_plan_keys +- [ ] get_usage_plans +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_stack + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 42% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [ ] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_directory +- [ ] get_facet +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet + +## cloudformation - 17% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [ ] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [ ] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_invalidation +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_service_linked_role +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_invalidation +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_invalidations +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 53% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_repository +- [ ] get_blob +- [ ] get_branch +- [ ] get_commit +- [ ] get_differences +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_repositories +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_default_branch +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 0% implemented +- [ ] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [ ] get_credentials_for_identity +- [ ] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [ ] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 0% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [ ] admin_create_user +- [ ] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [ ] admin_get_user +- [ ] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_settings +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] change_password +- [ ] confirm_device +- [ ] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [ ] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [ ] create_user_pool +- [ ] create_user_pool_client +- [ ] create_user_pool_domain +- [ ] delete_group +- [ ] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [ ] delete_user_pool +- [ ] delete_user_pool_client +- [ ] delete_user_pool_domain +- [ ] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_user_import_job +- [ ] describe_user_pool +- [ ] describe_user_pool_client +- [ ] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [ ] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [ ] list_user_pool_clients +- [ ] list_user_pools +- [ ] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [ ] respond_to_auth_challenge +- [ ] set_ui_customization +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [ ] update_user_pool_client +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## config - 0% implemented +- [ ] delete_config_rule +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] deliver_config_snapshot +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_config_rule +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] delete_device_pool +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_pool +- [ ] update_network_profile +- [ ] update_project + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 36% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [X] create_table +- [X] delete_item +- [X] delete_table +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 39% implemented +- [ ] accept_reserved_instances_exchange_quote +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [ ] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [ ] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [ ] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_placement +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 27% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [ ] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] get_kinesis_stream +- [ ] list_delivery_streams +- [ ] put_record +- [ ] put_record_batch +- [ ] update_destination + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_game_session_placement +- [ ] start_matchmaking +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 0% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [ ] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [ ] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [ ] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [ ] get_table +- [ ] get_table_versions +- [ ] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_subscription_definition + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 46% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [X] delete_role_policy +- [ ] delete_saml_provider +- [ ] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [ ] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [X] put_role_policy +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [ ] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 45% implemented +- [ ] accept_certificate_transfer +- [X] attach_principal_policy +- [X] attach_thing_principal +- [ ] cancel_certificate_transfer +- [ ] create_certificate_from_csr +- [X] create_keys_and_certificate +- [X] create_policy +- [ ] create_policy_version +- [X] create_thing +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_ca_certificate +- [X] delete_certificate +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [X] delete_thing +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] deprecate_thing_type +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_endpoint +- [X] describe_thing +- [X] describe_thing_type +- [X] detach_principal_policy +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_logging_options +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [X] list_thing_principals +- [X] list_thing_types +- [X] list_things +- [ ] list_topic_rules +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] reject_certificate_transfer +- [ ] replace_topic_rule +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] transfer_certificate +- [ ] update_ca_certificate +- [X] update_certificate +- [X] update_thing + +## iot-data - 0% implemented +- [ ] delete_thing_shadow +- [ ] get_thing_shadow +- [ ] publish +- [ ] update_thing_shadow + +## kinesis - 61% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] describe_limits +- [X] describe_stream +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] update_shard_count + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry + +## logs - 24% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [ ] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## opsworks - 9% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [ ] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [ ] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 0% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [ ] create_account +- [ ] create_organization +- [ ] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [ ] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [ ] describe_organization +- [ ] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [ ] list_accounts +- [ ] list_accounts_for_parent +- [ ] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [ ] list_organizational_units_for_parent +- [ ] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [ ] list_roots +- [ ] list_targets_for_policy +- [ ] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] put_event_stream +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 83% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [X] list_lexicons +- [X] put_lexicon +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 31% implemented +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [ ] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [ ] delete_snapshot_copy_grant +- [X] delete_tags +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [ ] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [ ] disable_snapshot_copy +- [ ] enable_logging +- [ ] enable_snapshot_copy +- [ ] get_cluster_credentials +- [X] modify_cluster +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [ ] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] delete_collection +- [ ] delete_faces +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] get_celebrity_info +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image + +## resourcegroupstaggingapi - 0% implemented +- [ ] get_resources +- [ ] get_tag_keys +- [ ] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 13% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 12% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [ ] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [ ] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] upload_part +- [ ] upload_part_copy + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioning_artifact +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## ses - 13% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_tracking_options +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_protection +- [ ] describe_subscription +- [ ] list_attacks +- [ ] list_protections + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 60% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [ ] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 9% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_automation_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [ ] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [ ] send_command +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 54% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [ ] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workspaces - 0% implemented +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_tags +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] modify_workspace_properties +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/MANIFEST.in b/MANIFEST.in index cd1f1e886..43e8120e4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,5 +1,6 @@ include README.md LICENSE AUTHORS.md include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json +include moto/ec2/resources/amis.json recursive-include moto/templates * recursive-include tests * diff --git a/Makefile b/Makefile index a963c8293..99b7f2620 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,13 @@ SHELL := /bin/bash +ifeq ($(TEST_SERVER_MODE), true) + # exclude test_iot and test_iotdata for now + # because authentication of iot is very complicated + TEST_EXCLUDE := --exclude='test_iot.*' +else + TEST_EXCLUDE := +endif + init: @python setup.py develop @pip install -r requirements.txt @@ -10,8 +18,7 @@ lint: test: lint rm -f .coverage rm -rf cover - @nosetests -sv --with-coverage --cover-html ./tests/ - + @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) test_server: @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ @@ -29,7 +36,14 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: upload_pypi_artifact push_dockerhub_image tag_github_release +publish: implementation_coverage \ + upload_pypi_artifact \ + tag_github_release \ + push_dockerhub_image + +implementation_coverage: + ./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md + git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" scaffold: @pip install -r requirements-dev.txt > /dev/null diff --git a/README.md b/README.md index 7ced7b895..59dc67432 100644 --- a/README.md +++ b/README.md @@ -68,10 +68,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | Cloudwatch | @mock_cloudwatch | basic endpoints done | |------------------------------------------------------------------------------| +| CloudwatchEvents | @mock_events | all endpoints done | +|------------------------------------------------------------------------------| | Data Pipeline | @mock_datapipeline| basic endpoints done | |------------------------------------------------------------------------------| | DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | |------------------------------------------------------------------------------| | EC2 | @mock_ec2 | core endpoints done | | - AMI | | core endpoints done | @@ -86,7 +88,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | ELB | @mock_elb | core endpoints done | |------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | core endpoints done | +| ELBv2 | @mock_elbv2 | all endpoints done | |------------------------------------------------------------------------------| | EMR | @mock_emr | core endpoints done | |------------------------------------------------------------------------------| @@ -94,6 +96,9 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | IAM | @mock_iam | core endpoints done | |------------------------------------------------------------------------------| +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | +|------------------------------------------------------------------------------| | Lambda | @mock_lambda | basic endpoints done, requires | | | | docker | |------------------------------------------------------------------------------| @@ -115,7 +120,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | S3 | @mock_s3 | core endpoints done | |------------------------------------------------------------------------------| -| SES | @mock_ses | core endpoints done | +| SES | @mock_ses | all endpoints done | |------------------------------------------------------------------------------| | SNS | @mock_sns | all endpoints done | |------------------------------------------------------------------------------| @@ -127,7 +132,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | SWF | @mock_swf | basic endpoints done | |------------------------------------------------------------------------------| -| X-Ray | @mock_xray | core endpoints done | +| X-Ray | @mock_xray | all endpoints done | |------------------------------------------------------------------------------| ``` @@ -297,6 +302,7 @@ boto3.resource( ## Install + ```console $ pip install moto ``` diff --git a/moto/__init__.py b/moto/__init__.py index 0c0358324..8a4b30979 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -41,6 +41,8 @@ from .swf import mock_swf, mock_swf_deprecated # flake8: noqa from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa from .batch import mock_batch # flake8: noqa +from .iot import mock_iot # flake8: noqa +from .iotdata import mock_iotdata # flake8: noqa try: diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 90a14473a..ab99e4119 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -185,7 +185,7 @@ class FakeAutoScalingGroup(BaseModel): target_group_arns = properties.get("TargetGroupARNs", []) backend = autoscaling_backends[region_name] - group = backend.create_autoscaling_group( + group = backend.create_auto_scaling_group( name=resource_name, availability_zones=properties.get("AvailabilityZones", []), desired_capacity=properties.get("DesiredCapacity"), @@ -215,13 +215,13 @@ class FakeAutoScalingGroup(BaseModel): def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): backend = autoscaling_backends[region_name] try: - backend.delete_autoscaling_group(resource_name) + backend.delete_auto_scaling_group(resource_name) except KeyError: pass def delete(self, region_name): backend = autoscaling_backends[region_name] - backend.delete_autoscaling_group(self.name) + backend.delete_auto_scaling_group(self.name) @property def physical_resource_id(self): @@ -358,7 +358,7 @@ class AutoScalingBackend(BaseBackend): def delete_launch_configuration(self, launch_configuration_name): self.launch_configurations.pop(launch_configuration_name, None) - def create_autoscaling_group(self, name, availability_zones, + def create_auto_scaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, @@ -402,7 +402,7 @@ class AutoScalingBackend(BaseBackend): self.update_attached_target_groups(group.name) return group - def update_autoscaling_group(self, name, availability_zones, + def update_auto_scaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, @@ -415,18 +415,18 @@ class AutoScalingBackend(BaseBackend): placement_group, termination_policies) return group - def describe_autoscaling_groups(self, names): + def describe_auto_scaling_groups(self, names): groups = self.autoscaling_groups.values() if names: return [group for group in groups if group.name in names] else: return list(groups) - def delete_autoscaling_group(self, group_name): + def delete_auto_scaling_group(self, group_name): self.set_desired_capacity(group_name, 0) self.autoscaling_groups.pop(group_name, None) - def describe_autoscaling_instances(self): + def describe_auto_scaling_instances(self): instance_states = [] for group in self.autoscaling_groups.values(): instance_states.extend(group.instance_states) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index aea04a124..d3f9ca483 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -67,7 +67,7 @@ class AutoScalingResponse(BaseResponse): return template.render() def create_auto_scaling_group(self): - self.autoscaling_backend.create_autoscaling_group( + self.autoscaling_backend.create_auto_scaling_group( name=self._get_param('AutoScalingGroupName'), availability_zones=self._get_multi_param( 'AvailabilityZones.member'), @@ -160,7 +160,7 @@ class AutoScalingResponse(BaseResponse): def describe_auto_scaling_groups(self): names = self._get_multi_param("AutoScalingGroupNames.member") token = self._get_param("NextToken") - all_groups = self.autoscaling_backend.describe_autoscaling_groups(names) + all_groups = self.autoscaling_backend.describe_auto_scaling_groups(names) all_names = [group.name for group in all_groups] if token: start = all_names.index(token) + 1 @@ -177,7 +177,7 @@ class AutoScalingResponse(BaseResponse): return template.render(groups=groups, next_token=next_token) def update_auto_scaling_group(self): - self.autoscaling_backend.update_autoscaling_group( + self.autoscaling_backend.update_auto_scaling_group( name=self._get_param('AutoScalingGroupName'), availability_zones=self._get_multi_param( 'AvailabilityZones.member'), @@ -198,7 +198,7 @@ class AutoScalingResponse(BaseResponse): def delete_auto_scaling_group(self): group_name = self._get_param('AutoScalingGroupName') - self.autoscaling_backend.delete_autoscaling_group(group_name) + self.autoscaling_backend.delete_auto_scaling_group(group_name) template = self.response_template(DELETE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -218,7 +218,7 @@ class AutoScalingResponse(BaseResponse): return template.render() def describe_auto_scaling_instances(self): - instance_states = self.autoscaling_backend.describe_autoscaling_instances() + instance_states = self.autoscaling_backend.describe_auto_scaling_instances() template = self.response_template( DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) @@ -314,7 +314,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """{{ launch_configuration.instance_type }} arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: - 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }} {% if launch_configuration.block_device_mappings %} {% for mount_point, mapping in launch_configuration.block_device_mappings.items() %} @@ -504,7 +504,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.health_check_period }} {{ group.default_cooldown }} arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb - :autoScalingGroupName/my-test-asg-lbs + :autoScalingGroupName/{{ group.name }} {% if group.termination_policies %} {% for policy in group.termination_policies %} diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 935abbcd6..6306acd5c 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -298,7 +298,12 @@ class LambdaFunction(BaseModel): volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs) finally: if container: - exit_code = container.wait() + try: + exit_code = container.wait(timeout=300) + except requests.exceptions.ReadTimeout: + exit_code = -1 + container.stop() + container.kill() output = container.logs(stdout=False, stderr=True) output += container.logs(stdout=True, stderr=False) container.remove() diff --git a/moto/backends.py b/moto/backends.py index d1ce0730e..771cd4018 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -35,8 +35,11 @@ from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends from moto.xray import xray_backends +from moto.iot import iot_backends +from moto.iotdata import iotdata_backends from moto.batch import batch_backends + BACKENDS = { 'acm': acm_backends, 'apigateway': apigateway_backends, @@ -74,7 +77,9 @@ BACKENDS = { 'sts': sts_backends, 'route53': route53_backends, 'lambda': lambda_backends, - 'xray': xray_backends + 'xray': xray_backends, + 'iot': iot_backends, + 'iot-data': iotdata_backends, } diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 05a408be1..1c13c5058 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -15,6 +15,7 @@ from moto.dynamodb import models as dynamodb_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models +from moto.elbv2 import models as elbv2_models from moto.iam import models as iam_models from moto.kinesis import models as kinesis_models from moto.kms import models as kms_models @@ -61,6 +62,9 @@ MODEL_MAP = { "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, "AWS::ECS::Service": ecs_models.Service, "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, + "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, + "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, + "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, @@ -326,7 +330,7 @@ def parse_output(output_logical_id, output_json, resources_map): output_json = clean_json(output_json, resources_map) output = Output() output.key = output_logical_id - output.value = output_json['Value'] + output.value = clean_json(output_json['Value'], resources_map) output.description = output_json.get('Description') return output diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 423cf92c1..a5b251b89 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -19,10 +19,19 @@ class CloudFormationResponse(BaseResponse): template_url_parts = urlparse(template_url) if "localhost" in template_url: bucket_name, key_name = template_url_parts.path.lstrip( - "/").split("/") + "/").split("/", 1) else: - bucket_name = template_url_parts.netloc.split(".")[0] - key_name = template_url_parts.path.lstrip("/") + if template_url_parts.netloc.endswith('amazonaws.com') \ + and template_url_parts.netloc.startswith('s3'): + # Handle when S3 url uses amazon url with bucket in path + # Also handles getting region as technically s3 is region'd + + # region = template_url.netloc.split('.')[1] + bucket_name, key_name = template_url_parts.path.lstrip( + "/").split("/", 1) + else: + bucket_name = template_url_parts.netloc.split(".")[0] + key_name = template_url_parts.path.lstrip("/") key = s3_backend.get_key(bucket_name, key_name) return key.value.decode("utf-8") @@ -227,13 +236,13 @@ CREATE_STACK_RESPONSE_TEMPLATE = """ """ -UPDATE_STACK_RESPONSE_TEMPLATE = """ +UPDATE_STACK_RESPONSE_TEMPLATE = """ {{ stack.stack_id }} - b9b5b068-3a41-11e5-94eb-example - + b9b4b068-3a41-11e5-94eb-example + """ @@ -399,16 +408,6 @@ GET_TEMPLATE_RESPONSE_TEMPLATE = """ """ -UPDATE_STACK_RESPONSE_TEMPLATE = """ - - {{ stack.stack_id }} - - - b9b4b068-3a41-11e5-94eb-example - - -""" - DELETE_STACK_RESPONSE_TEMPLATE = """ 5ccc7dcd-744c-11e5-be70-example @@ -416,6 +415,7 @@ DELETE_STACK_RESPONSE_TEMPLATE = """ """ + LIST_EXPORTS_RESPONSE = """ diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index ac328def2..f9d571a23 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -1,4 +1,7 @@ +import json + from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError import boto.ec2.cloudwatch import datetime @@ -35,9 +38,26 @@ class FakeAlarm(BaseModel): self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions self.unit = unit - self.state_updated_timestamp = datetime.datetime.utcnow() self.configuration_updated_timestamp = datetime.datetime.utcnow() + self.history = [] + + self.state_reason = '' + self.state_reason_data = '{}' + self.state = 'OK' + self.state_updated_timestamp = datetime.datetime.utcnow() + + def update_state(self, reason, reason_data, state_value): + # History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action + self.history.append( + ('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp) + ) + + self.state_reason = reason + self.state_reason_data = reason_data + self.state = state_value + self.state_updated_timestamp = datetime.datetime.utcnow() + class MetricDatum(BaseModel): @@ -122,10 +142,8 @@ class CloudWatchBackend(BaseBackend): if alarm.name in alarm_names ] - def get_alarms_by_state_value(self, state): - raise NotImplementedError( - "DescribeAlarm by state is not implemented in moto." - ) + def get_alarms_by_state_value(self, target_state): + return filter(lambda alarm: alarm.state == target_state, self.alarms.values()) def delete_alarms(self, alarm_names): for alarm_name in alarm_names: @@ -164,6 +182,21 @@ class CloudWatchBackend(BaseBackend): def get_dashboard(self, dashboard): return self.dashboards.get(dashboard) + def set_alarm_state(self, alarm_name, reason, reason_data, state_value): + try: + if reason_data is not None: + json.loads(reason_data) + except ValueError: + raise RESTError('InvalidFormat', 'StateReasonData is invalid JSON') + + if alarm_name not in self.alarms: + raise RESTError('ResourceNotFound', 'Alarm {0} not found'.format(alarm_name), status=404) + + if state_value not in ('OK', 'ALARM', 'INSUFFICIENT_DATA'): + raise RESTError('InvalidParameterValue', 'StateValue is not one of OK | ALARM | INSUFFICIENT_DATA') + + self.alarms[alarm_name].update_state(reason, reason_data, state_value) + class LogGroup(BaseModel): diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index cd7ce123e..7a5fa5ebd 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,4 +1,5 @@ import json +from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import cloudwatch_backends @@ -13,6 +14,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(ERROR_RESPONSE_TEMPLATE) return template.render(code=code, message=message), dict(status=status) + @amzn_request_id def put_metric_alarm(self): name = self._get_param('AlarmName') namespace = self._get_param('Namespace') @@ -40,6 +42,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) + @amzn_request_id def describe_alarms(self): action_prefix = self._get_param('ActionPrefix') alarm_name_prefix = self._get_param('AlarmNamePrefix') @@ -62,12 +65,14 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) return template.render(alarms=alarms) + @amzn_request_id def delete_alarms(self): alarm_names = self._get_multi_param('AlarmNames.member') self.cloudwatch_backend.delete_alarms(alarm_names) template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE) return template.render() + @amzn_request_id def put_metric_data(self): namespace = self._get_param('Namespace') metric_data = [] @@ -99,11 +104,13 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() + @amzn_request_id def list_metrics(self): metrics = self.cloudwatch_backend.get_all_metrics() template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics) + @amzn_request_id def delete_dashboards(self): dashboards = self._get_multi_param('DashboardNames.member') if dashboards is None: @@ -116,18 +123,23 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(DELETE_DASHBOARD_TEMPLATE) return template.render() + @amzn_request_id def describe_alarm_history(self): raise NotImplementedError() + @amzn_request_id def describe_alarms_for_metric(self): raise NotImplementedError() + @amzn_request_id def disable_alarm_actions(self): raise NotImplementedError() + @amzn_request_id def enable_alarm_actions(self): raise NotImplementedError() + @amzn_request_id def get_dashboard(self): dashboard_name = self._get_param('DashboardName') @@ -138,9 +150,11 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(GET_DASHBOARD_TEMPLATE) return template.render(dashboard=dashboard) + @amzn_request_id def get_metric_statistics(self): raise NotImplementedError() + @amzn_request_id def list_dashboards(self): prefix = self._get_param('DashboardNamePrefix', '') @@ -149,6 +163,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(LIST_DASHBOARD_RESPONSE) return template.render(dashboards=dashboards) + @amzn_request_id def put_dashboard(self): name = self._get_param('DashboardName') body = self._get_param('DashboardBody') @@ -163,14 +178,23 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_DASHBOARD_RESPONSE) return template.render() + @amzn_request_id def set_alarm_state(self): - raise NotImplementedError() + alarm_name = self._get_param('AlarmName') + reason = self._get_param('StateReason') + reason_data = self._get_param('StateReasonData') + state_value = self._get_param('StateValue') + + self.cloudwatch_backend.set_alarm_state(alarm_name, reason, reason_data, state_value) + + template = self.response_template(SET_ALARM_STATE_TEMPLATE) + return template.render() PUT_METRIC_ALARM_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -229,7 +253,7 @@ DESCRIBE_ALARMS_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -237,7 +261,7 @@ DELETE_METRIC_ALARMS_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -271,7 +295,7 @@ PUT_DASHBOARD_RESPONSE = """ - 68d1dc8c-9faa-11e7-a694-df2715690df2 + {{ request_id }} """ @@ -307,16 +331,22 @@ GET_DASHBOARD_TEMPLATE = """ + + {{ request_id }} + +""" + ERROR_RESPONSE_TEMPLATE = """ Sender {{ code }} {{ message }} - 5e45fd1e-9fa3-11e7-b720-89e8821d38c4 + {{ request_id }} """ diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index 5474707d6..40202f7bd 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -34,6 +34,8 @@ ERROR_JSON_RESPONSE = u"""{ class RESTError(HTTPException): + code = 400 + templates = { 'single_error': SINGLE_ERROR_RESPONSE, 'error': ERROR_RESPONSE, @@ -54,7 +56,6 @@ class DryRunClientError(RESTError): class JsonRESTError(RESTError): - def __init__(self, error_type, message, template='error_json', **kwargs): super(JsonRESTError, self).__init__( error_type, message, template, **kwargs) diff --git a/moto/core/models.py b/moto/core/models.py index 6e93f911a..c6fb72ffa 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import @@ -176,16 +177,49 @@ class ServerModeMockAWS(BaseMockAWS): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:5000" return real_boto3_resource(*args, **kwargs) + + def fake_httplib_send_output(self, message_body=None, *args, **kwargs): + def _convert_to_bytes(mixed_buffer): + bytes_buffer = [] + for chunk in mixed_buffer: + if isinstance(chunk, six.text_type): + bytes_buffer.append(chunk.encode('utf-8')) + else: + bytes_buffer.append(chunk) + msg = b"\r\n".join(bytes_buffer) + return msg + + self._buffer.extend((b"", b"")) + msg = _convert_to_bytes(self._buffer) + del self._buffer[:] + if isinstance(message_body, bytes): + msg += message_body + message_body = None + self.send(msg) + # if self._expect_header_set: + # read, write, exc = select.select([self.sock], [], [self.sock], 1) + # if read: + # self._handle_expect_response(message_body) + # return + if message_body is not None: + self.send(message_body) + self._client_patcher = mock.patch('boto3.client', fake_boto3_client) - self._resource_patcher = mock.patch( - 'boto3.resource', fake_boto3_resource) + self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource) + if six.PY2: + self._httplib_patcher = mock.patch('httplib.HTTPConnection._send_output', fake_httplib_send_output) + self._client_patcher.start() self._resource_patcher.start() + if six.PY2: + self._httplib_patcher.start() def disable_patching(self): if self._client_patcher: self._client_patcher.stop() self._resource_patcher.stop() + if six.PY2: + self._httplib_patcher.stop() class Model(type): diff --git a/moto/core/responses.py b/moto/core/responses.py index 572a45229..be0a4ef45 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -17,6 +17,8 @@ from six.moves.urllib.parse import parse_qs, urlparse import xmltodict from pkg_resources import resource_filename from werkzeug.exceptions import HTTPException + +import boto3 from moto.compat import OrderedDict from moto.core.utils import camelcase_to_underscores, method_names_from_class @@ -103,7 +105,8 @@ class _TemplateEnvironmentMixin(object): class BaseResponse(_TemplateEnvironmentMixin): default_region = 'us-east-1' - region_regex = r'\.(.+?)\.amazonaws\.com' + # to extract region, use [^.] + region_regex = r'\.([^.]+?)\.amazonaws\.com' aws_service_spec = None @classmethod @@ -151,12 +154,12 @@ class BaseResponse(_TemplateEnvironmentMixin): querystring.update(headers) querystring = _decode_dict(querystring) - self.uri = full_url self.path = urlparse(full_url).path self.querystring = querystring self.method = request.method self.region = self.get_region_from_url(request, full_url) + self.uri_match = None self.headers = request.headers if 'host' not in self.headers: @@ -178,6 +181,58 @@ class BaseResponse(_TemplateEnvironmentMixin): self.setup_class(request, full_url, headers) return self.call_action() + def uri_to_regexp(self, uri): + """converts uri w/ placeholder to regexp + '/cars/{carName}/drivers/{DriverName}' + -> '^/cars/.*/drivers/[^/]*$' + + '/cars/{carName}/drivers/{DriverName}/drive' + -> '^/cars/.*/drivers/.*/drive$' + + """ + def _convert(elem, is_last): + if not re.match('^{.*}$', elem): + return elem + name = elem.replace('{', '').replace('}', '') + if is_last: + return '(?P<%s>[^/]*)' % name + return '(?P<%s>.*)' % name + + elems = uri.split('/') + num_elems = len(elems) + regexp = '^{}$'.format('/'.join([_convert(elem, (i == num_elems - 1)) for i, elem in enumerate(elems)])) + return regexp + + def _get_action_from_method_and_request_uri(self, method, request_uri): + """basically used for `rest-json` APIs + You can refer to example from link below + https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json + """ + + # service response class should have 'SERVICE_NAME' class member, + # if you want to get action from method and url + if not hasattr(self, 'SERVICE_NAME'): + return None + service = self.SERVICE_NAME + conn = boto3.client(service, region_name=self.region) + + # make cache if it does not exist yet + if not hasattr(self, 'method_urls'): + self.method_urls = defaultdict(lambda: defaultdict(str)) + op_names = conn._service_model.operation_names + for op_name in op_names: + op_model = conn._service_model.operation_model(op_name) + _method = op_model.http['method'] + uri_regexp = self.uri_to_regexp(op_model.http['requestUri']) + self.method_urls[_method][uri_regexp] = op_model.name + regexp_and_names = self.method_urls[method] + for regexp, name in regexp_and_names.items(): + match = re.match(regexp, request_uri) + self.uri_match = match + if match: + return name + return None + def _get_action(self): action = self.querystring.get('Action', [""])[0] if not action: # Some services use a header for the action @@ -186,7 +241,9 @@ class BaseResponse(_TemplateEnvironmentMixin): 'x-amz-target') or self.headers.get('X-Amz-Target') if match: action = match.split(".")[-1] - + # get action from method and uri + if not action: + return self._get_action_from_method_and_request_uri(self.method, self.path) return action def call_action(self): @@ -221,6 +278,22 @@ class BaseResponse(_TemplateEnvironmentMixin): val = self.querystring.get(param_name) if val is not None: return val[0] + + # try to get json body parameter + if self.body is not None: + try: + return json.loads(self.body)[param_name] + except ValueError: + pass + except KeyError: + pass + # try to get path parameter + if self.uri_match: + try: + return self.uri_match.group(param_name) + except IndexError: + # do nothing if param is not found + pass return if_none def _get_int_param(self, param_name, if_none=None): diff --git a/moto/core/utils.py b/moto/core/utils.py index 2ea4dc4a8..43f05672e 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -272,9 +272,6 @@ def amzn_request_id(f): else: status, new_headers, body = response headers.update(new_headers) - # Cast status to string - if "status" in headers: - headers['status'] = str(headers['status']) request_id = gen_amzn_requestid_long(headers) diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index ad3f042d2..a56a83b35 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals -from .models import dynamodb_backend2 +from .models import dynamodb_backends as dynamodb_backends2 +from ..core.models import base_decorator, deprecated_base_decorator -dynamodb_backends2 = {"global": dynamodb_backend2} -mock_dynamodb2 = dynamodb_backend2.decorator -mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator +dynamodb_backend2 = dynamodb_backends2['us-east-1'] +mock_dynamodb2 = base_decorator(dynamodb_backends2) +mock_dynamodb2_deprecated = deprecated_base_decorator(dynamodb_backends2) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index bec72d327..a4d8feb3c 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -1,13 +1,16 @@ from __future__ import unicode_literals from collections import defaultdict +import copy import datetime import decimal import json import re +import boto3 from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time +from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func, get_filter_expression, Op @@ -271,6 +274,10 @@ class Table(BaseModel): self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) self.tags = [] + self.ttl = { + 'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED', + # 'AttributeName': 'string' # Can contain this + } def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name @@ -413,7 +420,7 @@ class Table(BaseModel): def query(self, hash_key, range_comparison, range_objs, limit, exclusive_start_key, scan_index_forward, projection_expression, - index_name=None, **filter_kwargs): + index_name=None, filter_expression=None, **filter_kwargs): results = [] if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) @@ -486,7 +493,8 @@ class Table(BaseModel): if projection_expression: expressions = [x.strip() for x in projection_expression.split(',')] - for result in possible_results: + results = copy.deepcopy(results) + for result in results: for attr in list(result.attrs): if attr not in expressions: result.attrs.pop(attr) @@ -496,6 +504,9 @@ class Table(BaseModel): scanned_count = len(list(self.all_items())) + if filter_expression is not None: + results = [item for item in results if filter_expression.expr(item)] + results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) return results, scanned_count, last_evaluated_key @@ -577,9 +588,16 @@ class Table(BaseModel): class DynamoDBBackend(BaseBackend): - def __init__(self): + def __init__(self, region_name=None): + self.region_name = region_name self.tables = OrderedDict() + def reset(self): + region_name = self.region_name + + self.__dict__ = {} + self.__init__(region_name) + def create_table(self, name, **params): if name in self.tables: return None @@ -595,6 +613,11 @@ class DynamoDBBackend(BaseBackend): if self.tables[table].table_arn == table_arn: self.tables[table].tags.extend(tags) + def untag_resource(self, table_arn, tag_keys): + for table in self.tables: + if self.tables[table].table_arn == table_arn: + self.tables[table].tags = [tag for tag in self.tables[table].tags if tag['Key'] not in tag_keys] + def list_tags_of_resource(self, table_arn): required_table = None for table in self.tables: @@ -689,7 +712,9 @@ class DynamoDBBackend(BaseBackend): return table.get_item(hash_key, range_key) def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, - limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): + limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, + expr_names=None, expr_values=None, filter_expression=None, + **filter_kwargs): table = self.tables.get(table_name) if not table: return None, None @@ -698,8 +723,13 @@ class DynamoDBBackend(BaseBackend): range_values = [DynamoType(range_value) for range_value in range_value_dicts] + if filter_expression is not None: + filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) + else: + filter_expression = Op(None, None) # Will always eval to true + return table.query(hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, projection_expression, index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): table = self.tables.get(table_name) @@ -796,5 +826,28 @@ class DynamoDBBackend(BaseBackend): hash_key, range_key = self.get_keys_value(table, keys) return table.delete_item(hash_key, range_key) + def update_ttl(self, table_name, ttl_spec): + table = self.tables.get(table_name) + if table is None: + raise JsonRESTError('ResourceNotFound', 'Table not found') -dynamodb_backend2 = DynamoDBBackend() + if 'Enabled' not in ttl_spec or 'AttributeName' not in ttl_spec: + raise JsonRESTError('InvalidParameterValue', + 'TimeToLiveSpecification does not contain Enabled and AttributeName') + + if ttl_spec['Enabled']: + table.ttl['TimeToLiveStatus'] = 'ENABLED' + else: + table.ttl['TimeToLiveStatus'] = 'DISABLED' + table.ttl['AttributeName'] = ttl_spec['AttributeName'] + + def describe_ttl(self, table_name): + table = self.tables.get(table_name) + if table is None: + raise JsonRESTError('ResourceNotFound', 'Table not found') + + return table.ttl + + +available_regions = boto3.session.Session().get_available_regions("dynamodb") +dynamodb_backends = {region: DynamoDBBackend(region_name=region) for region in available_regions} diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 218cfc21d..952d33efa 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -5,7 +5,7 @@ import re from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores, amzn_request_id -from .models import dynamodb_backend2, dynamo_json_dump +from .models import dynamodb_backends, dynamo_json_dump class DynamoHandler(BaseResponse): @@ -24,6 +24,14 @@ class DynamoHandler(BaseResponse): def error(self, type_, message, status=400): return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) + @property + def dynamodb_backend(self): + """ + :return: DynamoDB2 Backend + :rtype: moto.dynamodb2.models.DynamoDBBackend + """ + return dynamodb_backends[self.region] + @amzn_request_id def call_action(self): self.body = json.loads(self.body or '{}') @@ -46,10 +54,10 @@ class DynamoHandler(BaseResponse): limit = body.get('Limit', 100) if body.get("ExclusiveStartTableName"): last = body.get("ExclusiveStartTableName") - start = list(dynamodb_backend2.tables.keys()).index(last) + 1 + start = list(self.dynamodb_backend.tables.keys()).index(last) + 1 else: start = 0 - all_tables = list(dynamodb_backend2.tables.keys()) + all_tables = list(self.dynamodb_backend.tables.keys()) if limit: tables = all_tables[start:start + limit] else: @@ -74,12 +82,12 @@ class DynamoHandler(BaseResponse): global_indexes = body.get("GlobalSecondaryIndexes", []) local_secondary_indexes = body.get("LocalSecondaryIndexes", []) - table = dynamodb_backend2.create_table(table_name, - schema=key_schema, - throughput=throughput, - attr=attr, - global_indexes=global_indexes, - indexes=local_secondary_indexes) + table = self.dynamodb_backend.create_table(table_name, + schema=key_schema, + throughput=throughput, + attr=attr, + global_indexes=global_indexes, + indexes=local_secondary_indexes) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -88,7 +96,7 @@ class DynamoHandler(BaseResponse): def delete_table(self): name = self.body['TableName'] - table = dynamodb_backend2.delete_table(name) + table = self.dynamodb_backend.delete_table(name) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -96,15 +104,21 @@ class DynamoHandler(BaseResponse): return self.error(er, 'Requested resource not found') def tag_resource(self): - tags = self.body['Tags'] table_arn = self.body['ResourceArn'] - dynamodb_backend2.tag_resource(table_arn, tags) - return json.dumps({}) + tags = self.body['Tags'] + self.dynamodb_backend.tag_resource(table_arn, tags) + return '' + + def untag_resource(self): + table_arn = self.body['ResourceArn'] + tags = self.body['TagKeys'] + self.dynamodb_backend.untag_resource(table_arn, tags) + return '' def list_tags_of_resource(self): try: table_arn = self.body['ResourceArn'] - all_tags = dynamodb_backend2.list_tags_of_resource(table_arn) + all_tags = self.dynamodb_backend.list_tags_of_resource(table_arn) all_tag_keys = [tag['Key'] for tag in all_tags] marker = self.body.get('NextToken') if marker: @@ -127,17 +141,17 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] if 'GlobalSecondaryIndexUpdates' in self.body: - table = dynamodb_backend2.update_table_global_indexes( + table = self.dynamodb_backend.update_table_global_indexes( name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] - table = dynamodb_backend2.update_table_throughput(name, throughput) + table = self.dynamodb_backend.update_table_throughput(name, throughput) return dynamo_json_dump(table.describe()) def describe_table(self): name = self.body['TableName'] try: - table = dynamodb_backend2.tables[name] + table = self.dynamodb_backend.tables[name] except KeyError: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') @@ -188,8 +202,7 @@ class DynamoHandler(BaseResponse): expected[not_exists_m.group(1)] = {'Exists': False} try: - result = dynamodb_backend2.put_item( - name, item, expected, overwrite) + result = self.dynamodb_backend.put_item(name, item, expected, overwrite) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -214,10 +227,10 @@ class DynamoHandler(BaseResponse): request = list(table_request.values())[0] if request_type == 'PutRequest': item = request['Item'] - dynamodb_backend2.put_item(table_name, item) + self.dynamodb_backend.put_item(table_name, item) elif request_type == 'DeleteRequest': keys = request['Key'] - item = dynamodb_backend2.delete_item(table_name, keys) + item = self.dynamodb_backend.delete_item(table_name, keys) response = { "ConsumedCapacity": [ @@ -237,7 +250,7 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] key = self.body['Key'] try: - item = dynamodb_backend2.get_item(name, key) + item = self.dynamodb_backend.get_item(name, key) except ValueError: er = 'com.amazon.coral.validate#ValidationException' return self.error(er, 'Validation Exception') @@ -268,7 +281,7 @@ class DynamoHandler(BaseResponse): attributes_to_get = table_request.get('AttributesToGet') results["Responses"][table_name] = [] for key in keys: - item = dynamodb_backend2.get_item(table_name, key) + item = self.dynamodb_backend.get_item(table_name, key) if item: item_describe = item.describe_attrs(attributes_to_get) results["Responses"][table_name].append( @@ -285,7 +298,9 @@ class DynamoHandler(BaseResponse): # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get('KeyConditionExpression') projection_expression = self.body.get('ProjectionExpression') - expression_attribute_names = self.body.get('ExpressionAttributeNames') + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + filter_expression = self.body.get('FilterExpression') + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) if projection_expression and expression_attribute_names: expressions = [x.strip() for x in projection_expression.split(',')] @@ -294,10 +309,11 @@ class DynamoHandler(BaseResponse): projection_expression = projection_expression.replace(expression, expression_attribute_names[expression]) filter_kwargs = {} - if key_condition_expression: - value_alias_map = self.body['ExpressionAttributeValues'] - table = dynamodb_backend2.get_table(name) + if key_condition_expression: + value_alias_map = self.body.get('ExpressionAttributeValues', {}) + + table = self.dynamodb_backend.get_table(name) # If table does not exist if table is None: @@ -320,7 +336,7 @@ class DynamoHandler(BaseResponse): index = table.schema reverse_attribute_lookup = dict((v, k) for k, v in - six.iteritems(self.body['ExpressionAttributeNames'])) + six.iteritems(self.body.get('ExpressionAttributeNames', {}))) if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) @@ -359,13 +375,14 @@ class DynamoHandler(BaseResponse): range_values = [] hash_key_value_alias = hash_key_expression.split("=")[1].strip() - hash_key = value_alias_map[hash_key_value_alias] + # Temporary fix until we get proper KeyConditionExpression function + hash_key = value_alias_map.get(hash_key_value_alias, {'S': hash_key_value_alias}) else: # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} key_conditions = self.body.get('KeyConditions') query_filters = self.body.get("QueryFilter") if key_conditions: - hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name( + hash_key_name, range_key_name = self.dynamodb_backend.get_table_keys_name( name, key_conditions.keys()) for key, value in key_conditions.items(): if key not in (hash_key_name, range_key_name): @@ -398,9 +415,12 @@ class DynamoHandler(BaseResponse): exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") scan_index_forward = self.body.get("ScanIndexForward") - items, scanned_count, last_evaluated_key = dynamodb_backend2.query( + items, scanned_count, last_evaluated_key = self.dynamodb_backend.query( name, hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, + expr_names=expression_attribute_names, expr_values=expression_attribute_values, + filter_expression=filter_expression, **filter_kwargs + ) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') @@ -442,12 +462,12 @@ class DynamoHandler(BaseResponse): limit = self.body.get("Limit") try: - items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, - limit, - exclusive_start_key, - filter_expression, - expression_attribute_names, - expression_attribute_values) + items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, + limit, + exclusive_start_key, + filter_expression, + expression_attribute_names, + expression_attribute_values) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) @@ -478,12 +498,12 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] keys = self.body['Key'] return_values = self.body.get('ReturnValues', '') - table = dynamodb_backend2.get_table(name) + table = self.dynamodb_backend.get_table(name) if not table: er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') - item = dynamodb_backend2.delete_item(name, keys) + item = self.dynamodb_backend.delete_item(name, keys) if item and return_values == 'ALL_OLD': item_dict = item.to_json() else: @@ -500,7 +520,7 @@ class DynamoHandler(BaseResponse): 'ExpressionAttributeNames', {}) expression_attribute_values = self.body.get( 'ExpressionAttributeValues', {}) - existing_item = dynamodb_backend2.get_item(name, key) + existing_item = self.dynamodb_backend.get_item(name, key) if 'Expected' in self.body: expected = self.body['Expected'] @@ -536,9 +556,10 @@ class DynamoHandler(BaseResponse): '\s*([=\+-])\s*', '\\1', update_expression) try: - item = dynamodb_backend2.update_item( - name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, - expected) + item = self.dynamodb_backend.update_item( + name, key, update_expression, attribute_updates, expression_attribute_names, + expression_attribute_values, expected + ) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -555,3 +576,26 @@ class DynamoHandler(BaseResponse): item_dict['Attributes'] = {} return dynamo_json_dump(item_dict) + + def describe_limits(self): + return json.dumps({ + 'AccountMaxReadCapacityUnits': 20000, + 'TableMaxWriteCapacityUnits': 10000, + 'AccountMaxWriteCapacityUnits': 20000, + 'TableMaxReadCapacityUnits': 10000 + }) + + def update_time_to_live(self): + name = self.body['TableName'] + ttl_spec = self.body['TimeToLiveSpecification'] + + self.dynamodb_backend.update_ttl(name, ttl_spec) + + return json.dumps({'TimeToLiveSpecification': ttl_spec}) + + def describe_time_to_live(self): + name = self.body['TableName'] + + ttl_spec = self.dynamodb_backend.describe_ttl(name) + + return json.dumps({'TimeToLiveDescription': ttl_spec}) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index b9cbe0407..1f376c96a 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2,10 +2,12 @@ from __future__ import unicode_literals import copy import itertools +import ipaddress import json -import os import re import six +import warnings +from pkg_resources import resource_filename import boto.ec2 @@ -44,7 +46,6 @@ from .exceptions import ( InvalidRouteTableIdError, InvalidRouteError, InvalidInstanceIdError, - MalformedAMIIdError, InvalidAMIIdError, InvalidAMIAttributeItemValueError, InvalidSnapshotIdError, @@ -113,8 +114,12 @@ from .utils import ( tag_filter_matches, ) -RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') -INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) +INSTANCE_TYPES = json.load( + open(resource_filename(__name__, 'resources/instance_types.json'), 'r') +) +AMIS = json.load( + open(resource_filename(__name__, 'resources/amis.json'), 'r') +) def utc_date_and_time(): @@ -384,6 +389,11 @@ class Instance(TaggedEC2Resource, BotoInstance): amis = self.ec2_backend.describe_images(filters={'image-id': image_id}) ami = amis[0] if amis else None + if ami is None: + warnings.warn('Could not find AMI with image-id:{0}, ' + 'in the near future this will ' + 'cause an error'.format(image_id), + PendingDeprecationWarning) self.platform = ami.platform if ami else None self.virtualization_type = ami.virtualization_type if ami else 'paravirtual' @@ -403,6 +413,10 @@ class Instance(TaggedEC2Resource, BotoInstance): subnet = ec2_backend.get_subnet(self.subnet_id) self.vpc_id = subnet.vpc_id self._placement.zone = subnet.availability_zone + + if associate_public_ip is None: + # Mapping public ip hasnt been explicitly enabled or disabled + associate_public_ip = subnet.map_public_ip_on_launch == 'true' elif placement: self._placement.zone = placement else: @@ -410,10 +424,22 @@ class Instance(TaggedEC2Resource, BotoInstance): self.block_device_mapping = BlockDeviceMapping() - self.prep_nics(kwargs.get("nics", {}), - subnet_id=self.subnet_id, - private_ip=kwargs.get("private_ip"), - associate_public_ip=associate_public_ip) + self._private_ips = set() + self.prep_nics( + kwargs.get("nics", {}), + private_ip=kwargs.get("private_ip"), + associate_public_ip=associate_public_ip + ) + + def __del__(self): + try: + subnet = self.ec2_backend.get_subnet(self.subnet_id) + for ip in self._private_ips: + subnet.del_subnet_ip(ip) + except Exception: + # Its not "super" critical we clean this up, as reset will do this + # worst case we'll get IP address exaustion... rarely + pass def setup_defaults(self): # Default have an instance with root volume should you not wish to @@ -548,14 +574,23 @@ class Instance(TaggedEC2Resource, BotoInstance): else: return self.security_groups - def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None): + def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None): self.nics = {} - if not private_ip: + if self.subnet_id: + subnet = self.ec2_backend.get_subnet(self.subnet_id) + if not private_ip: + private_ip = subnet.get_available_subnet_ip(instance=self) + else: + subnet.request_ip(private_ip, instance=self) + + self._private_ips.add(private_ip) + elif private_ip is None: + # Preserve old behaviour if in EC2-Classic mode private_ip = random_private_ip() # Primary NIC defaults - primary_nic = {'SubnetId': subnet_id, + primary_nic = {'SubnetId': self.subnet_id, 'PrivateIpAddress': private_ip, 'AssociatePublicIpAddress': associate_public_ip} primary_nic = dict((k, v) for k, v in primary_nic.items() if v) @@ -766,14 +801,12 @@ class InstanceBackend(object): associated with the given instance_ids. """ reservations = [] - for reservation in self.all_reservations(make_copy=True): + for reservation in self.all_reservations(): reservation_instance_ids = [ instance.id for instance in reservation.instances] matching_reservation = any( instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: - # We need to make a copy of the reservation because we have to modify the - # instances to limit to those requested reservation.instances = [ instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) @@ -787,15 +820,8 @@ class InstanceBackend(object): reservations = filter_reservations(reservations, filters) return reservations - def all_reservations(self, make_copy=False, filters=None): - if make_copy: - # Return copies so that other functions can modify them with changing - # the originals - reservations = [copy.deepcopy(reservation) - for reservation in self.reservations.values()] - else: - reservations = [ - reservation for reservation in self.reservations.values()] + def all_reservations(self, filters=None): + reservations = [copy.copy(reservation) for reservation in self.reservations.values()] if filters is not None: reservations = filter_reservations(reservations, filters) return reservations @@ -985,17 +1011,31 @@ class TagBackend(object): class Ami(TaggedEC2Resource): def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None): + name=None, description=None, owner_id=None, + + public=False, virtualization_type=None, architecture=None, + state='available', creation_date=None, platform=None, + image_type='machine', image_location=None, hypervisor=None, + root_device_type=None, root_device_name=None, sriov='simple', + region_name='us-east-1a' + ): self.ec2_backend = ec2_backend self.id = ami_id - self.state = "available" + self.state = state self.name = name + self.image_type = image_type + self.image_location = image_location + self.owner_id = owner_id self.description = description - self.virtualization_type = None - self.architecture = None + self.virtualization_type = virtualization_type + self.architecture = architecture self.kernel_id = None - self.platform = None - self.creation_date = utc_date_and_time() + self.platform = platform + self.hypervisor = hypervisor + self.root_device_name = root_device_name + self.root_device_type = root_device_type + self.sriov = sriov + self.creation_date = utc_date_and_time() if creation_date is None else creation_date if instance: self.instance = instance @@ -1023,8 +1063,11 @@ class Ami(TaggedEC2Resource): self.launch_permission_groups = set() self.launch_permission_users = set() + if public: + self.launch_permission_groups.add('all') + # AWS auto-creates these, we should reflect the same. - volume = self.ec2_backend.create_volume(15, "us-east-1a") + volume = self.ec2_backend.create_volume(15, region_name) self.ebs_snapshot = self.ec2_backend.create_snapshot( volume.id, "Auto-created snapshot for AMI %s" % self.id) @@ -1051,6 +1094,8 @@ class Ami(TaggedEC2Resource): return self.state elif filter_name == 'name': return self.name + elif filter_name == 'owner-id': + return self.owner_id else: return super(Ami, self).get_filter_value( filter_name, 'DescribeImages') @@ -1059,14 +1104,22 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): def __init__(self): self.amis = {} + + self._load_amis() + super(AmiBackend, self).__init__() - def create_image(self, instance_id, name=None, description=None): + def _load_amis(self): + for ami in AMIS: + ami_id = ami['ami_id'] + self.amis[ami_id] = Ami(self, **ami) + + def create_image(self, instance_id, name=None, description=None, owner_id=None): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) ami = Ami(self, ami_id, instance=instance, source_ami=None, - name=name, description=description) + name=name, description=description, owner_id=owner_id) self.amis[ami_id] = ami return ami @@ -1079,30 +1132,29 @@ class AmiBackend(object): self.amis[ami_id] = ami return ami - def describe_images(self, ami_ids=(), filters=None, exec_users=None): - images = [] + def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None): + images = self.amis.values() + + # Limit images by launch permissions if exec_users: - for ami_id in self.amis: - found = False + tmp_images = [] + for ami in images: for user_id in exec_users: - if user_id in self.amis[ami_id].launch_permission_users: - found = True - if found: - images.append(self.amis[ami_id]) - if images == []: - return images + if user_id in ami.launch_permission_users: + tmp_images.append(ami) + images = tmp_images + + # Limit by owner ids + if owners: + images = [ami for ami in images if ami.owner_id in owners] + + if ami_ids: + images = [ami for ami in images if ami.id in ami_ids] + + # Generic filters if filters: - images = images or self.amis.values() return generic_filter(filters, images) - else: - for ami_id in ami_ids: - if ami_id in self.amis: - images.append(self.amis[ami_id]) - elif not ami_id.startswith("ami-"): - raise MalformedAMIIdError(ami_id) - else: - raise InvalidAMIIdError(ami_id) - return images or self.amis.values() + return images def deregister_image(self, ami_id): if ami_id in self.amis: @@ -2127,10 +2179,17 @@ class Subnet(TaggedEC2Resource): self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block + self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch + # Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8) + self._subnet_ip_generator = self.cidr.hosts() + self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS + self._unused_ips = set() # if instance is destroyed hold IP here for reuse + self._subnet_ips = {} # has IP: instance + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -2197,6 +2256,46 @@ class Subnet(TaggedEC2Resource): '"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') raise UnformattedGetAttTemplateException() + def get_available_subnet_ip(self, instance): + try: + new_ip = self._unused_ips.pop() + except KeyError: + new_ip = six.next(self._subnet_ip_generator) + + # Skips any IP's if they've been manually specified + while str(new_ip) in self._subnet_ips: + new_ip = six.next(self._subnet_ip_generator) + + if new_ip == self.cidr.broadcast_address: + raise StopIteration() # Broadcast address cant be used obviously + # TODO StopIteration will be raised if no ip's available, not sure how aws handles this. + + new_ip = str(new_ip) + self._subnet_ips[new_ip] = instance + + return new_ip + + def request_ip(self, ip, instance): + if ipaddress.ip_address(ip) not in self.cidr: + raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr)) + + if ip in self._subnet_ips: + raise Exception('IP already in use') + try: + self._unused_ips.remove(ip) + except KeyError: + pass + + self._subnet_ips[ip] = instance + return ip + + def del_subnet_ip(self, ip): + try: + del self._subnet_ips[ip] + self._unused_ips.add(ip) + except KeyError: + pass # Unknown IP + class SubnetBackend(object): def __init__(self): @@ -3619,8 +3718,8 @@ class NatGatewayBackend(object): return self.nat_gateways.pop(nat_gateway_id) -class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, - RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend, +class EC2Backend(BaseBackend, InstanceBackend, TagBackend, EBSBackend, + RegionsAndZonesBackend, SecurityGroupBackend, AmiBackend, VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend, NetworkInterfaceBackend, VPNConnectionBackend, VPCPeeringConnectionBackend, diff --git a/moto/ec2/resources/amis.json b/moto/ec2/resources/amis.json new file mode 100644 index 000000000..5cc3122f3 --- /dev/null +++ b/moto/ec2/resources/amis.json @@ -0,0 +1,546 @@ +[ + { + "ami_id": "ami-03cf127a", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Nano Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Nano-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-12c6146b", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2008 R2 SP1 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2008-R2_SP1-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1812c061", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1e749f67", + "state": "available", + "public": true, + "owner_id": "099720109477", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Canonical, Ubuntu, 14.04 LTS, amd64 trusty image build on 2017-07-27", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1ecc1e67", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1f12c066", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Express 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-24f3215d", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-35e92e4c", + "state": "available", + "public": true, + "owner_id": "013907871322", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "SUSE Linux Enterprise Server 12 SP3 (HVM, 64-bit, SSD-Backed)", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "suse-sles-12-sp3-v20170907-hvm-ssd-x86_64", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-3bf32142", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-3df32144", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Enterprise 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-56ec3e2f", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Express 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-61db0918", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2003 R2 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2003-R2_SP2-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-6ef02217", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-760aaa0f", + "state": "available", + "public": true, + "owner_id": "137112412989", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 HVM GP2", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "amzn-ami-hvm-2017.09.1.20171103-x86_64-gp2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-77ed3f0e", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Full Locale English with SQL Enterprise 2016 SP1 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-785db401", + "state": "available", + "public": true, + "owner_id": "099720109477", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Canonical, Ubuntu, 16.04 LTS, amd64 xenial image build on 2017-07-21", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20170721", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-8104a4f8", + "state": "available", + "public": true, + "owner_id": "137112412989", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 PV EBS", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "amzn-ami-pv-2017.09.1.20171103-x86_64-ebs", + "virtualization_type": "paravirtual", + "hypervisor": "xen" + }, + { + "ami_id": "ami-84ee3cfd", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Web 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-86ee3cff", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-999844e0", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "Deep Learning on Amazon Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Deep Learning AMI Amazon Linux - 3.3_Oct2017", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-9b32e8e2", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "CUDA9 Classic Ubuntu DLAMI 1508914531", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Ubuntu CUDA9 DLAMI with MXNet/TF/Caffe2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-a9cc1ed0", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-afee3cd6", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Web 2016 SP1 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-b7e93bce", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 with Desktop Experience Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-bb9a6bc2", + "state": "available", + "public": true, + "owner_id": "309956199498", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Provided by Red Hat, Inc.", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "RHEL-7.4_HVM_GA-20170808-x86_64-2-Hourly2-GP2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-bceb39c5", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 with Containers Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-Containers-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-c2ff2dbb", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 RTM 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-RTM-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-c6f321bf", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-d1cb19a8", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2008 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2008-SP2-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-dca37ea5", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Deep Learning on Ubuntu Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Deep Learning AMI Ubuntu Linux - 2.4_Oct2017", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f0e83a89", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Enterprise 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f4cf1d8d", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f8e54081", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "CUDA9 Classic Amazon Linux DLAMI 1508914924", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "CUDA9ClassicAmazonLinuxDLAMIwithMXNetTensorflowandCaffe2 ", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-fa7cdd89", + "state": "available", + "public": true, + "owner_id": "013907871322", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda", + "description": "SUSE Linux Enterprise Server 11 Service Pack 4 ((PV, 64-bit, SSD-Backed)", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "suse-sles-11-sp4-v20151207-pv-ssd-x86_64", + "virtualization_type": "paravirtual", + "hypervisor": "xen" + } +] \ No newline at end of file diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index c92471093..19e6d31a1 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -36,9 +36,10 @@ class AmisResponse(BaseResponse): def describe_images(self): ami_ids = self._get_multi_param('ImageId') filters = filters_from_querystring(self.querystring) + owners = self._get_multi_param('Owner') exec_users = self._get_multi_param('ExecutableBy') images = self.ec2_backend.describe_images( - ami_ids=ami_ids, filters=filters, exec_users=exec_users) + ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -92,12 +93,12 @@ DESCRIBE_IMAGES_RESPONSE = """= 10 and key not in self.tags: @@ -186,6 +278,27 @@ class FakeLoadBalancer(BaseModel): ''' Not exposed as part of the ELB API - used for CloudFormation. ''' elbv2_backends[region].delete_load_balancer(self.arn) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + elbv2_backend = elbv2_backends[region_name] + + name = properties.get('Name', resource_name) + security_groups = properties.get("SecurityGroups") + subnet_ids = properties.get('Subnets') + scheme = properties.get('Scheme', 'internet-facing') + + load_balancer = elbv2_backend.create_load_balancer(name, security_groups, subnet_ids, scheme=scheme) + return load_balancer + + def get_cfn_attribute(self, attribute_name): + attributes = { + 'DNSName': self.dns_name, + 'LoadBalancerName': self.name, + } + return attributes[attribute_name] + class ELBv2Backend(BaseBackend): @@ -194,6 +307,26 @@ class ELBv2Backend(BaseBackend): self.target_groups = OrderedDict() self.load_balancers = OrderedDict() + @property + def ec2_backend(self): + """ + EC2 backend + + :return: EC2 Backend + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + + @property + def acm_backend(self): + """ + ACM backend + + :return: ACM Backend + :rtype: moto.acm.models.AWSCertificateManagerBackend + """ + return acm_backends[self.region_name] + def reset(self): region_name = self.region_name self.__dict__ = {} @@ -201,18 +334,17 @@ class ELBv2Backend(BaseBackend): def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): vpc_id = None - ec2_backend = ec2_backends[self.region_name] subnets = [] if not subnet_ids: raise SubnetNotFoundError() for subnet_id in subnet_ids: - subnet = ec2_backend.get_subnet(subnet_id) + subnet = self.ec2_backend.get_subnet(subnet_id) if subnet is None: raise SubnetNotFoundError() subnets.append(subnet) vpc_id = subnets[0].vpc_id - arn = "arn:aws:elasticloadbalancing:%s:1:loadbalancer/%s/50dc6c495c0c9188" % (self.region_name, name) + arn = make_arn_for_load_balancer(account_id=1, name=name, region_name=self.region_name) dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) if arn in self.load_balancers: @@ -279,7 +411,7 @@ class ELBv2Backend(BaseBackend): def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '32' characters" % name + "Target group name '%s' cannot be longer than '22' characters" % name ) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( @@ -300,7 +432,30 @@ class ELBv2Backend(BaseBackend): if target_group.name == name: raise DuplicateTargetGroupName() - arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name) + valid_protocols = ['HTTPS', 'HTTP', 'TCP'] + if kwargs['healthcheck_protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) + if kwargs['protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'protocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) + + if FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None: + raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') + + valid_protocols = ['HTTPS', 'HTTP', 'TCP'] + if kwargs['healthcheck_protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) + if kwargs['protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'protocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) + + arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name) target_group = FakeTargetGroup(name, arn, **kwargs) self.target_groups[target_group.arn] = target_group return target_group @@ -547,6 +702,166 @@ class ELBv2Backend(BaseBackend): modified_rules.append(given_rule) return modified_rules + def set_ip_address_type(self, arn, ip_type): + if ip_type not in ('internal', 'dualstack'): + raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack') + + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + if ip_type == 'dualstack' and balancer.scheme == 'internal': + raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack') + + balancer.stack = ip_type + + def set_security_groups(self, arn, sec_groups): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + # Check all security groups exist + for sec_group_id in sec_groups: + if self.ec2_backend.get_security_group_from_id(sec_group_id) is None: + raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id)) + + balancer.security_groups = sec_groups + + def set_subnets(self, arn, subnets): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + subnet_objects = [] + sub_zone_list = {} + for subnet in subnets: + try: + subnet = self.ec2_backend.get_subnet(subnet) + + if subnet.availability_zone in sub_zone_list: + raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone') + + sub_zone_list[subnet.availability_zone] = subnet.id + subnet_objects.append(subnet) + except Exception: + raise SubnetNotFoundError() + + if len(sub_zone_list) < 2: + raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified') + + balancer.subnets = subnet_objects + + return sub_zone_list.items() + + def modify_load_balancer_attributes(self, arn, attrs): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + for key in attrs: + if key not in FakeLoadBalancer.VALID_ATTRS: + raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key)) + + balancer.attrs.update(attrs) + return balancer.attrs + + def describe_load_balancer_attributes(self, arn): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + return balancer.attrs + + def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None, + health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None): + target_group = self.target_groups.get(arn) + if target_group is None: + raise TargetGroupNotFoundError() + + if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None: + raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') + + if http_codes is not None: + target_group.matcher['HttpCode'] = http_codes + if health_check_interval is not None: + target_group.healthcheck_interval_seconds = health_check_interval + if health_check_path is not None: + target_group.healthcheck_path = health_check_path + if health_check_port is not None: + target_group.healthcheck_port = health_check_port + if health_check_proto is not None: + target_group.healthcheck_protocol = health_check_proto + if health_check_timeout is not None: + target_group.healthcheck_timeout_seconds = health_check_timeout + if healthy_threshold_count is not None: + target_group.healthy_threshold_count = healthy_threshold_count + if unhealthy_threshold_count is not None: + target_group.unhealthy_threshold_count = unhealthy_threshold_count + + return target_group + + def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None): + for load_balancer in self.load_balancers.values(): + if arn in load_balancer.listeners: + break + else: + raise ListenerNotFoundError() + + listener = load_balancer.listeners[arn] + + if port is not None: + for listener_arn, current_listener in load_balancer.listeners.items(): + if listener_arn == arn: + continue + if listener.port == port: + raise DuplicateListenerError() + + listener.port = port + + if protocol is not None: + if protocol not in ('HTTP', 'HTTPS', 'TCP'): + raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol)) + + # HTTPS checks + if protocol == 'HTTPS': + # HTTPS + + # Might already be HTTPS so may not provide certs + if certificates is None and listener.protocol != 'HTTPS': + raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS') + + # Check certificates exist + if certificates is not None: + default_cert = None + all_certs = set() # for SNI + for cert in certificates: + if cert['is_default'] == 'true': + default_cert = cert['certificate_arn'] + try: + self.acm_backend.get_certificate(cert['certificate_arn']) + except Exception: + raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn'])) + + all_certs.add(cert['certificate_arn']) + + if default_cert is None: + raise RESTError('InvalidConfigurationRequest', 'No default certificate') + + listener.certificate = default_cert + listener.certificates = list(all_certs) + + listener.protocol = protocol + + if ssl_policy is not None: + # Its already validated in responses.py + listener.ssl_policy = ssl_policy + + if default_actions is not None: + # Is currently not validated + listener.default_actions = default_actions + + return listener + def _any_listener_using(self, target_group_arn): for load_balancer in self.load_balancers.values(): for listener in load_balancer.listeners.values(): diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 3e8535187..aa855b430 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals +from moto.core.exceptions import RESTError +from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import elbv2_backends from .exceptions import DuplicateTagKeysError @@ -6,12 +8,131 @@ from .exceptions import LoadBalancerNotFoundError from .exceptions import TargetGroupNotFoundError -class ELBV2Response(BaseResponse): +SSL_POLICIES = [ + { + 'name': 'ELBSecurityPolicy-2016-08', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ], + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-2-2017-01', + 'ssl_protocols': ['TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 5}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 8}, + {'name': 'AES128-GCM-SHA256', 'priority': 9}, + {'name': 'AES128-SHA256', 'priority': 10}, + {'name': 'AES256-GCM-SHA384', 'priority': 11}, + {'name': 'AES256-SHA256', 'priority': 12} + ] + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-1-2017-01', + 'ssl_protocols': ['TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ] + }, + { + 'name': 'ELBSecurityPolicy-2015-05', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ] + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-0-2015-04', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18}, + {'name': 'DES-CBC3-SHA', 'priority': 19} + ] + } +] + +class ELBV2Response(BaseResponse): @property def elbv2_backend(self): return elbv2_backends[self.region] + @amzn_request_id def create_load_balancer(self): load_balancer_name = self._get_param('Name') subnet_ids = self._get_multi_param("Subnets.member") @@ -28,6 +149,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) return template.render(load_balancer=load_balancer) + @amzn_request_id def create_rule(self): lister_arn = self._get_param('ListenerArn') _conditions = self._get_list_prefix('Conditions.member') @@ -52,6 +174,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_RULE_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def create_target_group(self): name = self._get_param('Name') vpc_id = self._get_param('VpcId') @@ -64,6 +187,7 @@ class ELBV2Response(BaseResponse): healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5') healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') + http_codes = self._get_param('Matcher.HttpCode', '200') target_group = self.elbv2_backend.create_target_group( name, @@ -77,11 +201,13 @@ class ELBV2Response(BaseResponse): healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthy_threshold_count=healthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count, + matcher={'HttpCode': http_codes} ) template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) return template.render(target_group=target_group) + @amzn_request_id def create_listener(self): load_balancer_arn = self._get_param('LoadBalancerArn') protocol = self._get_param('Protocol') @@ -105,6 +231,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_LISTENER_TEMPLATE) return template.render(listener=listener) + @amzn_request_id def describe_load_balancers(self): arns = self._get_multi_param("LoadBalancerArns.member") names = self._get_multi_param("Names.member") @@ -124,6 +251,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) + @amzn_request_id def describe_rules(self): listener_arn = self._get_param('ListenerArn') rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None @@ -144,6 +272,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_RULES_TEMPLATE) return template.render(rules=rules_resp, marker=next_marker) + @amzn_request_id def describe_target_groups(self): load_balancer_arn = self._get_param('LoadBalancerArn') target_group_arns = self._get_multi_param('TargetGroupArns.member') @@ -153,6 +282,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) return template.render(target_groups=target_groups) + @amzn_request_id def describe_target_group_attributes(self): target_group_arn = self._get_param('TargetGroupArn') target_group = self.elbv2_backend.target_groups.get(target_group_arn) @@ -161,6 +291,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=target_group.attributes) + @amzn_request_id def describe_listeners(self): load_balancer_arn = self._get_param('LoadBalancerArn') listener_arns = self._get_multi_param('ListenerArns.member') @@ -171,30 +302,35 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) return template.render(listeners=listeners) + @amzn_request_id def delete_load_balancer(self): arn = self._get_param('LoadBalancerArn') self.elbv2_backend.delete_load_balancer(arn) template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() + @amzn_request_id def delete_rule(self): arn = self._get_param('RuleArn') self.elbv2_backend.delete_rule(arn) template = self.response_template(DELETE_RULE_TEMPLATE) return template.render() + @amzn_request_id def delete_target_group(self): arn = self._get_param('TargetGroupArn') self.elbv2_backend.delete_target_group(arn) template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) return template.render() + @amzn_request_id def delete_listener(self): arn = self._get_param('ListenerArn') self.elbv2_backend.delete_listener(arn) template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() + @amzn_request_id def modify_rule(self): rule_arn = self._get_param('RuleArn') _conditions = self._get_list_prefix('Conditions.member') @@ -217,6 +353,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(MODIFY_RULE_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def modify_target_group_attributes(self): target_group_arn = self._get_param('TargetGroupArn') target_group = self.elbv2_backend.target_groups.get(target_group_arn) @@ -230,6 +367,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=attributes) + @amzn_request_id def register_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -238,6 +376,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(REGISTER_TARGETS_TEMPLATE) return template.render() + @amzn_request_id def deregister_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -246,6 +385,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) return template.render() + @amzn_request_id def describe_target_health(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -254,6 +394,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) return template.render(target_health_descriptions=target_health_descriptions) + @amzn_request_id def set_rule_priorities(self): rule_priorities = self._get_list_prefix('RulePriorities.member') for rule_priority in rule_priorities: @@ -262,6 +403,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def add_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') @@ -281,6 +423,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() + @amzn_request_id def remove_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') tag_keys = self._get_multi_param('TagKeys.member') @@ -301,6 +444,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(REMOVE_TAGS_TEMPLATE) return template.render() + @amzn_request_id def describe_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') resources = [] @@ -320,6 +464,125 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TAGS_TEMPLATE) return template.render(resources=resources) + @amzn_request_id + def describe_account_limits(self): + # Supports paging but not worth implementing yet + # marker = self._get_param('Marker') + # page_size = self._get_param('PageSize') + + limits = { + 'application-load-balancers': 20, + 'target-groups': 3000, + 'targets-per-application-load-balancer': 30, + 'listeners-per-application-load-balancer': 50, + 'rules-per-application-load-balancer': 100, + 'network-load-balancers': 20, + 'targets-per-network-load-balancer': 200, + 'listeners-per-network-load-balancer': 50 + } + + template = self.response_template(DESCRIBE_LIMITS_TEMPLATE) + return template.render(limits=limits) + + @amzn_request_id + def describe_ssl_policies(self): + names = self._get_multi_param('Names.member.') + # Supports paging but not worth implementing yet + # marker = self._get_param('Marker') + # page_size = self._get_param('PageSize') + + policies = SSL_POLICIES + if names: + policies = filter(lambda policy: policy['name'] in names, policies) + + template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE) + return template.render(policies=policies) + + @amzn_request_id + def set_ip_address_type(self): + arn = self._get_param('LoadBalancerArn') + ip_type = self._get_param('IpAddressType') + + self.elbv2_backend.set_ip_address_type(arn, ip_type) + + template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE) + return template.render(ip_type=ip_type) + + @amzn_request_id + def set_security_groups(self): + arn = self._get_param('LoadBalancerArn') + sec_groups = self._get_multi_param('SecurityGroups.member.') + + self.elbv2_backend.set_security_groups(arn, sec_groups) + + template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE) + return template.render(sec_groups=sec_groups) + + @amzn_request_id + def set_subnets(self): + arn = self._get_param('LoadBalancerArn') + subnets = self._get_multi_param('Subnets.member.') + + subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets) + + template = self.response_template(SET_SUBNETS_TEMPLATE) + return template.render(subnets=subnet_zone_list) + + @amzn_request_id + def modify_load_balancer_attributes(self): + arn = self._get_param('LoadBalancerArn') + attrs = self._get_map_prefix('Attributes.member', key_end='Key', value_end='Value') + + all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs) + + template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE) + return template.render(attrs=all_attrs) + + @amzn_request_id + def describe_load_balancer_attributes(self): + arn = self._get_param('LoadBalancerArn') + attrs = self.elbv2_backend.describe_load_balancer_attributes(arn) + + template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE) + return template.render(attrs=attrs) + + @amzn_request_id + def modify_target_group(self): + arn = self._get_param('TargetGroupArn') + + health_check_proto = self._get_param('HealthCheckProtocol') # 'HTTP' | 'HTTPS' | 'TCP', + health_check_port = self._get_param('HealthCheckPort') + health_check_path = self._get_param('HealthCheckPath') + health_check_interval = self._get_param('HealthCheckIntervalSeconds') + health_check_timeout = self._get_param('HealthCheckTimeoutSeconds') + healthy_threshold_count = self._get_param('HealthyThresholdCount') + unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount') + http_codes = self._get_param('Matcher.HttpCode') + + target_group = self.elbv2_backend.modify_target_group(arn, health_check_proto, health_check_port, health_check_path, health_check_interval, + health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes) + + template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE) + return template.render(target_group=target_group) + + @amzn_request_id + def modify_listener(self): + arn = self._get_param('ListenerArn') + port = self._get_param('Port') + protocol = self._get_param('Protocol') + ssl_policy = self._get_param('SslPolicy') + certificates = self._get_list_prefix('Certificates.member') + default_actions = self._get_list_prefix('DefaultActions.member') + + # Should really move SSL Policies to models + if ssl_policy is not None and ssl_policy not in [item['name'] for item in SSL_POLICIES]: + raise RESTError('SSLPolicyNotFound', 'Policy {0} not found'.format(ssl_policy)) + + listener = self.elbv2_backend.modify_listener(arn, port, protocol, ssl_policy, certificates, default_actions) + + template = self.response_template(MODIFY_LISTENER_TEMPLATE) + return template.render(listener=listener) + def _add_tags(self, resource): tag_values = [] tag_keys = [] @@ -348,14 +611,14 @@ class ELBV2Response(BaseResponse): ADD_TAGS_TEMPLATE = """ - 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + {{ request_id }} """ REMOVE_TAGS_TEMPLATE = """ - 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + {{ request_id }} """ @@ -378,11 +641,10 @@ DESCRIBE_TAGS_TEMPLATE = """ @@ -415,7 +677,7 @@ CREATE_LOAD_BALANCER_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_RULE_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_TARGET_GROUP_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_LISTENER_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ @@ -572,6 +841,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ @@ -671,11 +945,10 @@ DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """ - 70092c0e-f3a9-11e5-ae48-cff02092876b + {{ request_id }} """ - DESCRIBE_LISTENERS_TEMPLATE = """ @@ -706,7 +979,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """ - 70092c0e-f3a9-11e5-ae48-cff02092876b + {{ request_id }} """ @@ -782,7 +1055,7 @@ REGISTER_TARGETS_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ - DELETE_LOAD_BALANCER_LISTENERS = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -837,7 +1109,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -871,7 +1143,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -887,7 +1159,7 @@ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """ - 07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE + {{ request_id }} """ @@ -895,7 +1167,7 @@ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """ - 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE + {{ request_id }} """ @@ -918,7 +1190,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """ + + + {% for key, value in limits.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + + {{ request_id }} + +""" + +DESCRIBE_SSL_POLICIES_TEMPLATE = """ + + + {% for policy in policies %} + + {{ policy['name'] }} + + {% for cipher in policy['ciphers'] %} + + {{ cipher['name'] }} + {{ cipher['priority'] }} + + {% endfor %} + + + {% for proto in policy['ssl_protocols'] %} + {{ proto }} + {% endfor %} + + + {% endfor %} + + + + {{ request_id }} + +""" + +SET_IP_ADDRESS_TYPE_TEMPLATE = """ + + {{ ip_type }} + + + {{ request_id }} + +""" + +SET_SECURITY_GROUPS_TEMPLATE = """ + + + {% for group in sec_groups %} + {{ group }} + {% endfor %} + + + + {{ request_id }} + +""" + +SET_SUBNETS_TEMPLATE = """ + + + {% for zone_id, subnet_id in subnets %} + + {{ subnet_id }} + {{ zone_id }} + + {% endfor %} + + + + {{ request_id }} + +""" + +MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """ + + + {% for key, value in attrs.items() %} + + {% if value == None %}{% else %}{{ value }}{% endif %} + {{ key }} + + {% endfor %} + + + + {{ request_id }} + +""" + +DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """ + + + {% for key, value in attrs.items() %} + + {% if value == None %}{% else %}{{ value }}{% endif %} + {{ key }} + + {% endfor %} + + + + {{ request_id }} + +""" + +MODIFY_TARGET_GROUP_TEMPLATE = """ + + + + {{ target_group.arn }} + {{ target_group.name }} + {{ target_group.protocol }} + {{ target_group.port }} + {{ target_group.vpc_id }} + {{ target_group.healthcheck_protocol }} + {{ target_group.healthcheck_port }} + {{ target_group.healthcheck_path }} + {{ target_group.healthcheck_interval_seconds }} + {{ target_group.healthcheck_timeout_seconds }} + {{ target_group.healthy_threshold_count }} + {{ target_group.unhealthy_threshold_count }} + + {{ target_group.matcher['HttpCode'] }} + + + {% for load_balancer_arn in target_group.load_balancer_arns %} + {{ load_balancer_arn }} + {% endfor %} + + + + + + {{ request_id }} + +""" + +MODIFY_LISTENER_TEMPLATE = """ + + + + {{ listener.load_balancer_arn }} + {{ listener.protocol }} + {% if listener.certificates %} + + {% for cert in listener.certificates %} + + {{ cert }} + + {% endfor %} + + {% endif %} + {{ listener.port }} + {{ listener.ssl_policy }} + {{ listener.arn }} + + {% for action in listener.default_actions %} + + {{ action.type }} + {{ action.target_group_arn }} + + {% endfor %} + + + + + + {{ request_id }} + +""" diff --git a/moto/elbv2/utils.py b/moto/elbv2/utils.py new file mode 100644 index 000000000..47a3e66d5 --- /dev/null +++ b/moto/elbv2/utils.py @@ -0,0 +1,8 @@ +def make_arn_for_load_balancer(account_id, name, region_name): + return "arn:aws:elasticloadbalancing:{}:{}:loadbalancer/{}/50dc6c495c0c9188".format( + region_name, account_id, name) + + +def make_arn_for_target_group(account_id, name, region_name): + return "arn:aws:elasticloadbalancing:{}:{}:targetgroup/{}/50dc6c495c0c9188".format( + region_name, account_id, name) diff --git a/moto/events/models.py b/moto/events/models.py index faec7b434..5c1d507ca 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,6 +1,7 @@ import os import re +from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel @@ -50,6 +51,8 @@ class Rule(BaseModel): class EventsBackend(BaseBackend): + ACCOUNT_ID = re.compile(r'^(\d{1,12}|\*)$') + STATEMENT_ID = re.compile(r'^[a-zA-Z0-9-_]{1,64}$') def __init__(self): self.rules = {} @@ -58,6 +61,8 @@ class EventsBackend(BaseBackend): self.rules_order = [] self.next_tokens = {} + self.permissions = {} + def _get_rule_by_index(self, i): return self.rules.get(self.rules_order[i]) @@ -181,6 +186,17 @@ class EventsBackend(BaseBackend): return False + def put_events(self, events): + num_events = len(events) + + if num_events < 1: + raise JsonRESTError('ValidationError', 'Need at least 1 event') + elif num_events > 10: + raise JsonRESTError('ValidationError', 'Can only submit 10 events at once') + + # We dont really need to store the events yet + return [] + def remove_targets(self, name, ids): rule = self.rules.get(name) @@ -193,5 +209,40 @@ class EventsBackend(BaseBackend): def test_event_pattern(self): raise NotImplementedError() + def put_permission(self, action, principal, statement_id): + if action is None or action != 'PutEvents': + raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents') + + if principal is None or self.ACCOUNT_ID.match(principal) is None: + raise JsonRESTError('InvalidParameterValue', 'Principal must match ^(\d{1,12}|\*)$') + + if statement_id is None or self.STATEMENT_ID.match(statement_id) is None: + raise JsonRESTError('InvalidParameterValue', 'StatementId must match ^[a-zA-Z0-9-_]{1,64}$') + + self.permissions[statement_id] = {'action': action, 'principal': principal} + + def remove_permission(self, statement_id): + try: + del self.permissions[statement_id] + except KeyError: + raise JsonRESTError('ResourceNotFoundException', 'StatementId not found') + + def describe_event_bus(self): + arn = "arn:aws:events:us-east-1:000000000000:event-bus/default" + statements = [] + for statement_id, data in self.permissions.items(): + statements.append({ + 'Sid': statement_id, + 'Effect': 'Allow', + 'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])}, + 'Action': 'events:{0}'.format(data['action']), + 'Resource': arn + }) + return { + 'Policy': {'Version': '2012-10-17', 'Statement': statements}, + 'Name': 'default', + 'Arn': arn + } + events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py index 8f433844a..f9cb9b5b5 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -18,9 +18,17 @@ class EventsHandler(BaseResponse): 'RoleArn': rule.role_arn } - def load_body(self): - decoded_body = self.body - return json.loads(decoded_body or '{}') + @property + def request_params(self): + if not hasattr(self, '_json_body'): + try: + self._json_body = json.loads(self.body) + except ValueError: + self._json_body = {} + return self._json_body + + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) def error(self, type_, message='', status=400): headers = self.response_headers @@ -28,8 +36,7 @@ class EventsHandler(BaseResponse): return json.dumps({'__type': type_, 'message': message}), headers, def delete_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -38,8 +45,7 @@ class EventsHandler(BaseResponse): return '', self.response_headers def describe_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -53,8 +59,7 @@ class EventsHandler(BaseResponse): return json.dumps(rule_dict), self.response_headers def disable_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -65,8 +70,7 @@ class EventsHandler(BaseResponse): return '', self.response_headers def enable_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -80,10 +84,9 @@ class EventsHandler(BaseResponse): pass def list_rule_names_by_target(self): - body = self.load_body() - target_arn = body.get('TargetArn') - next_token = body.get('NextToken') - limit = body.get('Limit') + target_arn = self._get_param('TargetArn') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') if not target_arn: return self.error('ValidationException', 'Parameter TargetArn is required.') @@ -94,10 +97,9 @@ class EventsHandler(BaseResponse): return json.dumps(rule_names), self.response_headers def list_rules(self): - body = self.load_body() - prefix = body.get('NamePrefix') - next_token = body.get('NextToken') - limit = body.get('Limit') + prefix = self._get_param('NamePrefix') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') rules = events_backend.list_rules(prefix, next_token, limit) rules_obj = {'Rules': []} @@ -111,10 +113,9 @@ class EventsHandler(BaseResponse): return json.dumps(rules_obj), self.response_headers def list_targets_by_rule(self): - body = self.load_body() - rule_name = body.get('Rule') - next_token = body.get('NextToken') - limit = body.get('Limit') + rule_name = self._get_param('Rule') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -128,13 +129,25 @@ class EventsHandler(BaseResponse): return json.dumps(targets), self.response_headers def put_events(self): + events = self._get_param('Entries') + + failed_entries = events_backend.put_events(events) + + if failed_entries: + return json.dumps({ + 'FailedEntryCount': len(failed_entries), + 'Entries': failed_entries + }) + return '', self.response_headers def put_rule(self): - body = self.load_body() - name = body.get('Name') - event_pattern = body.get('EventPattern') - sched_exp = body.get('ScheduleExpression') + name = self._get_param('Name') + event_pattern = self._get_param('EventPattern') + sched_exp = self._get_param('ScheduleExpression') + state = self._get_param('State') + desc = self._get_param('Description') + role_arn = self._get_param('RoleArn') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -156,17 +169,16 @@ class EventsHandler(BaseResponse): name, ScheduleExpression=sched_exp, EventPattern=event_pattern, - State=body.get('State'), - Description=body.get('Description'), - RoleArn=body.get('RoleArn') + State=state, + Description=desc, + RoleArn=role_arn ) return json.dumps({'RuleArn': rule_arn}), self.response_headers def put_targets(self): - body = self.load_body() - rule_name = body.get('Rule') - targets = body.get('Targets') + rule_name = self._get_param('Rule') + targets = self._get_param('Targets') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -180,9 +192,8 @@ class EventsHandler(BaseResponse): return '', self.response_headers def remove_targets(self): - body = self.load_body() - rule_name = body.get('Rule') - ids = body.get('Ids') + rule_name = self._get_param('Rule') + ids = self._get_param('Ids') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -197,3 +208,22 @@ class EventsHandler(BaseResponse): def test_event_pattern(self): pass + + def put_permission(self): + action = self._get_param('Action') + principal = self._get_param('Principal') + statement_id = self._get_param('StatementId') + + events_backend.put_permission(action, principal, statement_id) + + return '' + + def remove_permission(self): + statement_id = self._get_param('StatementId') + + events_backend.remove_permission(statement_id) + + return '' + + def describe_event_bus(self): + return json.dumps(events_backend.describe_event_bus()) diff --git a/moto/iot/__init__.py b/moto/iot/__init__.py new file mode 100644 index 000000000..199b8aeae --- /dev/null +++ b/moto/iot/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import iot_backends +from ..core.models import base_decorator + +iot_backend = iot_backends['us-east-1'] +mock_iot = base_decorator(iot_backends) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py new file mode 100644 index 000000000..4bb01c095 --- /dev/null +++ b/moto/iot/exceptions.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTClientError): + def __init__(self): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + "The request is not valid." + ) diff --git a/moto/iot/models.py b/moto/iot/models.py new file mode 100644 index 000000000..1efa6690e --- /dev/null +++ b/moto/iot/models.py @@ -0,0 +1,364 @@ +from __future__ import unicode_literals +import time +import boto3 +import string +import random +import hashlib +import uuid +from moto.core import BaseBackend, BaseModel +from collections import OrderedDict +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + self.ca_certificate_id = None + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.version = '1' # TODO: handle version + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.version + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.thing_types = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's wierd but thing_type_name is filterd by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + thing_types = self.thing_types.values() + return thing_types + + def list_things(self, attribute_name, attribute_value, thing_type_name): + # TODO: filter by attributess or thing_type + things = self.things.values() + return things + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + self.describe_certificate(certificate_id) + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py new file mode 100644 index 000000000..bbe2bb016 --- /dev/null +++ b/moto/iot/responses.py @@ -0,0 +1,258 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import iot_backends +import json + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + # previous_next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + # TODO: support next_token and max_results + next_token = None + return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token)) + + def list_things(self): + # previous_next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + ) + # TODO: support next_token and max_results + next_token = None + return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + print(thing.to_dict(include_default_client_id=True)) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_keys_and_certificate(self): + set_as_active = self._get_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: handle pagination + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: handle pagination + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: handle pagination + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: handle pagination + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: handle pagination + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) diff --git a/moto/iot/urls.py b/moto/iot/urls.py new file mode 100644 index 000000000..6d11c15a5 --- /dev/null +++ b/moto/iot/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import IoTResponse + +url_bases = [ + "https?://iot.(.+).amazonaws.com", +] + + +response = IoTResponse() + + +url_paths = { + '{0}/.*$': response.dispatch, +} diff --git a/moto/iotdata/__init__.py b/moto/iotdata/__init__.py new file mode 100644 index 000000000..214f2e575 --- /dev/null +++ b/moto/iotdata/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import iotdata_backends +from ..core.models import base_decorator + +iotdata_backend = iotdata_backends['us-east-1'] +mock_iotdata = base_decorator(iotdata_backends) diff --git a/moto/iotdata/exceptions.py b/moto/iotdata/exceptions.py new file mode 100644 index 000000000..ddc6b37fd --- /dev/null +++ b/moto/iotdata/exceptions.py @@ -0,0 +1,23 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTDataPlaneClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTDataPlaneClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTDataPlaneClientError): + def __init__(self, message): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", message + ) diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py new file mode 100644 index 000000000..7ae517109 --- /dev/null +++ b/moto/iotdata/models.py @@ -0,0 +1,189 @@ +from __future__ import unicode_literals +import json +import time +import boto3 +import jsondiff +from moto.core import BaseBackend, BaseModel +from moto.iot import iot_backends +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException +) + + +class FakeShadow(BaseModel): + """See the specification: + http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html + """ + def __init__(self, desired, reported, requested_payload, version, deleted=False): + self.desired = desired + self.reported = reported + self.requested_payload = requested_payload + self.version = version + self.timestamp = int(time.time()) + self.deleted = deleted + + self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp) + self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp) + + @classmethod + def create_from_previous_version(cls, previous_shadow, payload): + """ + set None to payload when you want to delete shadow + """ + version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {}) + + if payload is None: + # if given payload is None, delete existing payload + # this means the request was delete_thing_shadow + shadow = FakeShadow(None, None, None, version, deleted=True) + return shadow + + # we can make sure that payload has 'state' key + desired = payload['state'].get( + 'desired', + previous_payload.get('state', {}).get('desired', None) + ) + reported = payload['state'].get( + 'reported', + previous_payload.get('state', {}).get('reported', None) + ) + shadow = FakeShadow(desired, reported, payload, version) + return shadow + + @classmethod + def parse_payload(cls, desired, reported): + if desired is None: + delta = reported + elif reported is None: + delta = desired + else: + delta = jsondiff.diff(desired, reported) + return delta + + def _create_metadata_from_state(self, state, ts): + """ + state must be disired or reported stype dict object + replces primitive type with {"timestamp": ts} in dict + """ + if state is None: + return None + + def _f(elem, ts): + if isinstance(elem, dict): + return {_: _f(elem[_], ts) for _ in elem.keys()} + if isinstance(elem, list): + return [_f(_, ts) for _ in elem] + return {"timestamp": ts} + return _f(state, ts) + + def to_response_dict(self): + desired = self.requested_payload['state'].get('desired', None) + reported = self.requested_payload['state'].get('reported', None) + + payload = {} + if desired is not None: + payload['desired'] = desired + if reported is not None: + payload['reported'] = reported + + metadata = {} + if desired is not None: + metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp) + if reported is not None: + metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp) + return { + 'state': payload, + 'metadata': metadata, + 'timestamp': self.timestamp, + 'version': self.version + } + + def to_dict(self, include_delta=True): + """returning nothing except for just top-level keys for now. + """ + if self.deleted: + return { + 'timestamp': self.timestamp, + 'version': self.version + } + delta = self.parse_payload(self.desired, self.reported) + payload = {} + if self.desired is not None: + payload['desired'] = self.desired + if self.reported is not None: + payload['reported'] = self.reported + if include_delta and (delta is not None and len(delta.keys()) != 0): + payload['delta'] = delta + + metadata = {} + if self.metadata_desired is not None: + metadata['desired'] = self.metadata_desired + if self.metadata_reported is not None: + metadata['reported'] = self.metadata_reported + + return { + 'state': payload, + 'metadata': metadata, + 'timestamp': self.timestamp, + 'version': self.version + } + + +class IoTDataPlaneBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTDataPlaneBackend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def update_thing_shadow(self, thing_name, payload): + """ + spec of payload: + - need node `state` + - state node must be an Object + - State contains an invalid node: 'foo' + """ + thing = iot_backends[self.region_name].describe_thing(thing_name) + + # validate + try: + payload = json.loads(payload) + except ValueError: + raise InvalidRequestException('invalid json') + if 'state' not in payload: + raise InvalidRequestException('need node `state`') + if not isinstance(payload['state'], dict): + raise InvalidRequestException('state node must be an Object') + if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']): + raise InvalidRequestException('State contains an invalid node') + + new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) + thing.thing_shadow = new_shadow + return thing.thing_shadow + + def get_thing_shadow(self, thing_name): + thing = iot_backends[self.region_name].describe_thing(thing_name) + + if thing.thing_shadow is None or thing.thing_shadow.deleted: + raise ResourceNotFoundException() + return thing.thing_shadow + + def delete_thing_shadow(self, thing_name): + """after deleting, get_thing_shadow will raise ResourceNotFound. + But version of the shadow keep increasing... + """ + thing = iot_backends[self.region_name].describe_thing(thing_name) + if thing.thing_shadow is None: + raise ResourceNotFoundException() + payload = None + new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) + thing.thing_shadow = new_shadow + return thing.thing_shadow + + +available_regions = boto3.session.Session().get_available_regions("iot-data") +iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions} diff --git a/moto/iotdata/responses.py b/moto/iotdata/responses.py new file mode 100644 index 000000000..d87479011 --- /dev/null +++ b/moto/iotdata/responses.py @@ -0,0 +1,35 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import iotdata_backends +import json + + +class IoTDataPlaneResponse(BaseResponse): + SERVICE_NAME = 'iot-data' + + @property + def iotdata_backend(self): + return iotdata_backends[self.region] + + def update_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.body + payload = self.iotdata_backend.update_thing_shadow( + thing_name=thing_name, + payload=payload, + ) + return json.dumps(payload.to_response_dict()) + + def get_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.iotdata_backend.get_thing_shadow( + thing_name=thing_name, + ) + return json.dumps(payload.to_dict()) + + def delete_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.iotdata_backend.delete_thing_shadow( + thing_name=thing_name, + ) + return json.dumps(payload.to_dict()) diff --git a/moto/iotdata/urls.py b/moto/iotdata/urls.py new file mode 100644 index 000000000..a3bcb0a52 --- /dev/null +++ b/moto/iotdata/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import IoTDataPlaneResponse + +url_bases = [ + "https?://data.iot.(.+).amazonaws.com", +] + + +response = IoTDataPlaneResponse() + + +url_paths = { + '{0}/.*$': response.dispatch, +} diff --git a/moto/rds2/models.py b/moto/rds2/models.py index bb66ead57..cf83733ce 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -704,7 +704,8 @@ class RDS2Backend(BaseBackend): if self.arn_regex.match(source_database_id): db_kwargs['region'] = self.region - replica = copy.deepcopy(primary) + # Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances. + replica = copy.copy(primary) replica.update(db_kwargs) replica.set_as_replica() self.databases[database_id] = replica diff --git a/moto/route53/models.py b/moto/route53/models.py index f0e52086d..af8bb690a 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -196,14 +196,14 @@ class FakeZone(BaseModel): self.rrsets = [ record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] - def get_record_sets(self, type_filter, name_filter): + def get_record_sets(self, start_type, start_name): record_sets = list(self.rrsets) # Copy the list - if type_filter: + if start_type: record_sets = [ - record_set for record_set in record_sets if record_set._type == type_filter] - if name_filter: + record_set for record_set in record_sets if record_set._type >= start_type] + if start_name: record_sets = [ - record_set for record_set in record_sets if record_set.name == name_filter] + record_set for record_set in record_sets if record_set.name >= start_name] return record_sets diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 2419f896d..00e5c60a5 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -151,9 +151,9 @@ class Route53(BaseResponse): elif method == "GET": querystring = parse_qs(parsed_url.query) template = Template(LIST_RRSET_REPONSE) - type_filter = querystring.get("type", [None])[0] - name_filter = querystring.get("name", [None])[0] - record_sets = the_zone.get_record_sets(type_filter, name_filter) + start_type = querystring.get("type", [None])[0] + start_name = querystring.get("name", [None])[0] + record_sets = the_zone.get_record_sets(start_type, start_name) return 200, headers, template.render(record_sets=record_sets) def health_check_response(self, request, full_url, headers): diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b04cb9496..fb1735a5c 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -8,6 +8,7 @@ from six.moves.urllib.parse import parse_qs, urlparse import xmltodict +from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys @@ -113,7 +114,10 @@ class ResponseObject(_TemplateEnvironmentMixin): return 200, {}, response.encode("utf-8") else: status_code, headers, response_content = response - return status_code, headers, response_content.encode("utf-8") + if not isinstance(response_content, six.binary_type): + response_content = response_content.encode("utf-8") + + return status_code, headers, response_content def _bucket_response(self, request, full_url, headers): parsed_url = urlparse(full_url) @@ -139,6 +143,7 @@ class ResponseObject(_TemplateEnvironmentMixin): body = b'' if isinstance(body, six.binary_type): body = body.decode('utf-8') + body = u'{0}'.format(body).encode('utf-8') if method == 'HEAD': return self._bucket_response_head(bucket_name, headers) @@ -209,7 +214,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if not website_configuration: template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) - return website_configuration + return 200, {}, website_configuration elif 'acl' in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_OBJECT_ACL_RESPONSE) @@ -355,7 +360,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" if 'versioning' in querystring: - ver = re.search('([A-Za-z]+)', body) + ver = re.search('([A-Za-z]+)', body.decode()) if ver: self.backend.set_bucket_versioning(bucket_name, ver.group(1)) template = self.response_template(S3_BUCKET_VERSIONING) @@ -444,7 +449,12 @@ class ResponseObject(_TemplateEnvironmentMixin): def _bucket_response_post(self, request, body, bucket_name, headers): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" - path = request.path if hasattr(request, 'path') else request.path_url + + if isinstance(request, HTTPrettyRequest): + path = request.path + else: + path = request.full_path if hasattr(request, 'full_path') else request.path_url + if self.is_delete_keys(request, path, bucket_name): return self._bucket_response_delete_keys(request, body, bucket_name, headers) @@ -454,6 +464,8 @@ class ResponseObject(_TemplateEnvironmentMixin): form = request.form else: # HTTPretty, build new form object + body = body.decode() + form = {} for kv in body.split('&'): k, v = kv.split('=') @@ -764,7 +776,7 @@ class ResponseObject(_TemplateEnvironmentMixin): return FakeTagging() def _tagging_from_xml(self, xml): - parsed_xml = xmltodict.parse(xml) + parsed_xml = xmltodict.parse(xml, force_list={'Tag': True}) tags = [] for tag in parsed_xml['Tagging']['TagSet']['Tag']: diff --git a/moto/sns/exceptions.py b/moto/sns/exceptions.py index 95b91acca..0e7a0bdcf 100644 --- a/moto/sns/exceptions.py +++ b/moto/sns/exceptions.py @@ -32,3 +32,11 @@ class SNSInvalidParameter(RESTError): def __init__(self, message): super(SNSInvalidParameter, self).__init__( "InvalidParameter", message) + + +class InvalidParameterValue(RESTError): + code = 400 + + def __init__(self, message): + super(InvalidParameterValue, self).__init__( + "InvalidParameterValue", message) diff --git a/moto/sns/models.py b/moto/sns/models.py index 856255be5..80da5f92f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -7,6 +7,7 @@ import json import boto.sns import requests import six +import re from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel @@ -15,7 +16,8 @@ from moto.sqs import sqs_backends from moto.awslambda import lambda_backends from .exceptions import ( - SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter + SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter, + InvalidParameterValue ) from .utils import make_arn_for_topic, make_arn_for_subscription @@ -193,9 +195,15 @@ class SNSBackend(BaseBackend): self.sms_attributes.update(attrs) def create_topic(self, name): - topic = Topic(name, self) - self.topics[topic.arn] = topic - return topic + fails_constraints = not re.match(r'^[a-zA-Z0-9](?:[A-Za-z0-9_-]{0,253}[a-zA-Z0-9])?$', name) + if fails_constraints: + raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.") + candidate_topic = Topic(name, self) + if candidate_topic.arn in self.topics: + return self.topics[candidate_topic.arn] + else: + self.topics[candidate_topic.arn] = candidate_topic + return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 22f310228..85b69ab0e 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import base64 import hashlib +import json import re import six import struct @@ -9,6 +10,7 @@ from xml.sax.saxutils import escape import boto.sqs +from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle @@ -166,11 +168,14 @@ class Queue(BaseModel): 'ReceiveMessageWaitTimeSeconds', 'VisibilityTimeout', 'WaitTimeSeconds'] + ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', + 'GetQueueUrl', 'ReceiveMessage', 'SendMessage') def __init__(self, name, region, **kwargs): self.name = name self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region + self.tags = {} self._messages = [] @@ -189,14 +194,42 @@ class Queue(BaseModel): self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) + self.permissions = {} # wait_time_seconds will be set to immediate return messages self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) + self.redrive_policy = {} + self.dead_letter_queue = None + + if 'RedrivePolicy' in kwargs: + self._setup_dlq(kwargs['RedrivePolicy']) + # Check some conditions if self.fifo_queue and not self.name.endswith('.fifo'): raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') + def _setup_dlq(self, policy_json): + try: + self.redrive_policy = json.loads(policy_json) + except ValueError: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json') + + if 'deadLetterTargetArn' not in self.redrive_policy: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn') + if 'maxReceiveCount' not in self.redrive_policy: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount') + + for queue in sqs_backends[self.region].queues.values(): + if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']: + self.dead_letter_queue = queue + + if self.fifo_queue and not queue.fifo_queue: + raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues') + break + else: + raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn'])) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -382,9 +415,14 @@ class SQSBackend(BaseBackend): time.sleep(0.001) continue + messages_to_dlq = [] for message in queue.messages: if not message.visible: continue + if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']: + messages_to_dlq.append(message) + continue + message.mark_received( visibility_timeout=visibility_timeout ) @@ -392,6 +430,10 @@ class SQSBackend(BaseBackend): if len(result) >= count: break + for message in messages_to_dlq: + queue._messages.remove(message) + queue.dead_letter_queue.add_message(message) + return result def delete_message(self, queue_name, receipt_handle): @@ -419,6 +461,49 @@ class SQSBackend(BaseBackend): queue = self.get_queue(queue_name) queue._messages = [] + def list_dead_letter_source_queues(self, queue_name): + dlq = self.get_queue(queue_name) + + queues = [] + for queue in self.queues.values(): + if queue.dead_letter_queue is dlq: + queues.append(queue) + + return queues + + def add_permission(self, queue_name, actions, account_ids, label): + queue = self.get_queue(queue_name) + + if actions is None or len(actions) == 0: + raise RESTError('InvalidParameterValue', 'Need at least one Action') + if account_ids is None or len(account_ids) == 0: + raise RESTError('InvalidParameterValue', 'Need at least one Account ID') + + if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]): + raise RESTError('InvalidParameterValue', 'Invalid permissions') + + queue.permissions[label] = (account_ids, actions) + + def remove_permission(self, queue_name, label): + queue = self.get_queue(queue_name) + + if label not in queue.permissions: + raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label') + + del queue.permissions[label] + + def tag_queue(self, queue_name, tags): + queue = self.get_queue(queue_name) + queue.tags.update(tags) + + def untag_queue(self, queue_name, tag_keys): + queue = self.get_queue(queue_name) + for key in tag_keys: + try: + del queue.tags[key] + except KeyError: + pass + sqs_backends = {} for region in boto.sqs.regions(): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index 63a5036d6..bb21c1e2a 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -40,12 +40,15 @@ class SQSResponse(BaseResponse): queue_name = self.path.split("/")[-1] return queue_name - def _get_validated_visibility_timeout(self): + def _get_validated_visibility_timeout(self, timeout=None): """ :raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT :raises TypeError: If visibility timeout was not specified """ - visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0]) + if timeout is not None: + visibility_timeout = int(timeout) + else: + visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0]) if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT: raise ValueError @@ -119,6 +122,49 @@ class SQSResponse(BaseResponse): template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE) return template.render() + def change_message_visibility_batch(self): + queue_name = self._get_queue_name() + entries = self._get_list_prefix('ChangeMessageVisibilityBatchRequestEntry') + + success = [] + error = [] + for entry in entries: + try: + visibility_timeout = self._get_validated_visibility_timeout(entry['visibility_timeout']) + except ValueError: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'true', + 'Code': 'InvalidParameterValue', + 'Message': 'Visibility timeout invalid' + }) + continue + + try: + self.sqs_backend.change_message_visibility( + queue_name=queue_name, + receipt_handle=entry['receipt_handle'], + visibility_timeout=visibility_timeout + ) + success.append(entry['id']) + except ReceiptHandleIsInvalid as e: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'true', + 'Code': 'ReceiptHandleIsInvalid', + 'Message': e.description + }) + except MessageNotInflight as e: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'false', + 'Code': 'AWS.SimpleQueueService.MessageNotInflight', + 'Message': e.description + }) + + template = self.response_template(CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE) + return template.render(success=success, errors=error) + def get_queue_attributes(self): queue_name = self._get_queue_name() try: @@ -288,8 +334,62 @@ class SQSResponse(BaseResponse): messages = self.sqs_backend.receive_messages( queue_name, message_count, wait_time, visibility_timeout) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) - output = template.render(messages=messages) - return output + return template.render(messages=messages) + + def list_dead_letter_source_queues(self): + request_url = urlparse(self.uri) + queue_name = self._get_queue_name() + + source_queue_urls = self.sqs_backend.list_dead_letter_source_queues(queue_name) + + template = self.response_template(LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE) + return template.render(queues=source_queue_urls, request_url=request_url) + + def add_permission(self): + queue_name = self._get_queue_name() + actions = self._get_multi_param('ActionName') + account_ids = self._get_multi_param('AWSAccountId') + label = self._get_param('Label') + + self.sqs_backend.add_permission(queue_name, actions, account_ids, label) + + template = self.response_template(ADD_PERMISSION_RESPONSE) + return template.render() + + def remove_permission(self): + queue_name = self._get_queue_name() + label = self._get_param('Label') + + self.sqs_backend.remove_permission(queue_name, label) + + template = self.response_template(REMOVE_PERMISSION_RESPONSE) + return template.render() + + def tag_queue(self): + queue_name = self._get_queue_name() + tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value') + + self.sqs_backend.tag_queue(queue_name, tags) + + template = self.response_template(TAG_QUEUE_RESPONSE) + return template.render() + + def untag_queue(self): + queue_name = self._get_queue_name() + tag_keys = self._get_multi_param('TagKey') + + self.sqs_backend.untag_queue(queue_name, tag_keys) + + template = self.response_template(UNTAG_QUEUE_RESPONSE) + return template.render() + + def list_queue_tags(self): + queue_name = self._get_queue_name() + + queue = self.sqs_backend.get_queue(queue_name) + + template = self.response_template(LIST_QUEUE_TAGS_RESPONSE) + return template.render(tags=queue.tags) CREATE_QUEUE_RESPONSE = """ @@ -307,7 +407,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - 470a6f13-2ed9-4181-ad8a-2fdea142988e + {{ requestid }} """ @@ -318,13 +418,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - 725275ae-0b9b-4762-b238-436d7c65a1ac + {{ requestid }} """ DELETE_QUEUE_RESPONSE = """ - 6fde8d1e-52cd-4581-8cd9-c512f4c64223 + {{ requestid }} """ @@ -338,13 +438,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - 1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b + {{ requestid }} """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - e5cca473-4fc0-4198-a451-8abb94d02c75 + {{ requestid }} """ @@ -361,7 +461,7 @@ SEND_MESSAGE_RESPONSE = """ - 27daac76-34dd-47df-bd01-1f6e873584a0 + {{ requestid }} """ @@ -409,7 +509,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - b6633655-283d-45b4-aee4-4e84e0ae6afa + {{ requestid }} """ @@ -427,13 +527,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - ca1ad5d0-8271-408b-8d0f-1351bf547e74 + {{ requestid }} """ DELETE_MESSAGE_RESPONSE = """ - b5293cb5-d306-4a17-9048-b263635abe42 + {{ requestid }} """ @@ -446,22 +546,92 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - d6f86b7a-74d1-4439-b43f-196a1e29cd85 + {{ requestid }} """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - 6a7a282a-d013-4a59-aba9-335b0fa48bed + {{ requestid }} """ +CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ + + {% for success_id in success %} + + {{ success_id }} + + {% endfor %} + {% for error_dict in errors %} + + {{ error_dict['Id'] }} + {{ error_dict['Code'] }} + {{ error_dict['Message'] }} + {{ error_dict['SenderFault'] }} + + {% endfor %} + + + {{ request_id }} + +""" + PURGE_QUEUE_RESPONSE = """ - 6fde8d1e-52cd-4581-8cd9-c512f4c64223 + {{ requestid }} """ +LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE = """ + + {% for queue in queues %} + {{ queue.url(request_url) }} + {% endfor %} + + + 8ffb921f-b85e-53d9-abcf-d8d0057f38fc + +""" + +ADD_PERMISSION_RESPONSE = """ + + {{ request_id }} + +""" + +REMOVE_PERMISSION_RESPONSE = """ + + {{ request_id }} + +""" + +TAG_QUEUE_RESPONSE = """ + + {{ request_id }} + +""" + +UNTAG_QUEUE_RESPONSE = """ + + {{ request_id }} + +""" + +LIST_QUEUE_TAGS_RESPONSE = """ + + {% for key, value in tags.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + {{ request_id }} + +""" + ERROR_TOO_LONG_RESPONSE = """ Sender diff --git a/moto/ssm/models.py b/moto/ssm/models.py index a0e4a2155..c8c428b64 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -75,6 +75,21 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result + def get_parameters_by_path(self, path, with_decryption, recursive): + """Implement the get-parameters-by-path-API in the backend.""" + result = [] + # path could be with or without a trailing /. we handle this + # difference here. + path = path.rstrip('/') + '/' + for param in self._parameters: + if not param.startswith(path): + continue + if '/' in param[len(path) + 1:] and not recursive: + continue + result.append(self._parameters[param]) + + return result + def get_parameter(self, name, with_decryption): if name in self._parameters: return self._parameters[name] diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 3b75ada09..3227839b9 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -81,6 +81,25 @@ class SimpleSystemManagerResponse(BaseResponse): response['InvalidParameters'].append(name) return json.dumps(response) + def get_parameters_by_path(self): + path = self._get_param('Path') + with_decryption = self._get_param('WithDecryption') + recursive = self._get_param('Recursive', False) + + result = self.ssm_backend.get_parameters_by_path( + path, with_decryption, recursive + ) + + response = { + 'Parameters': [], + } + + for parameter in result: + param_data = parameter.response_object(with_decryption) + response['Parameters'].append(param_data) + + return json.dumps(response) + def describe_parameters(self): page_size = 10 filters = self._get_param('Filters') diff --git a/scripts/get_amis.py b/scripts/get_amis.py new file mode 100644 index 000000000..81f69c5dd --- /dev/null +++ b/scripts/get_amis.py @@ -0,0 +1,40 @@ +import boto3 +import json + +# Taken from free tear list when creating an instance +instances = [ + 'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0', + 'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b', + 'ami-d1cb19a8', 'ami-61db0918', 'ami-56ec3e2f', 'ami-84ee3cfd', 'ami-86ee3cff', 'ami-f0e83a89', 'ami-1f12c066', + 'ami-afee3cd6', 'ami-1812c061', 'ami-77ed3f0e', 'ami-3bf32142', 'ami-6ef02217', 'ami-f4cf1d8d', 'ami-3df32144', + 'ami-c6f321bf', 'ami-24f3215d', 'ami-fa7cdd89', 'ami-1e749f67', 'ami-a9cc1ed0', 'ami-8104a4f8' +] + +client = boto3.client('ec2', region_name='eu-west-1') + +test = client.describe_images(ImageIds=instances) + +result = [] +for image in test['Images']: + try: + tmp = { + 'ami_id': image['ImageId'], + 'name': image['Name'], + 'description': image['Description'], + 'owner_id': image['OwnerId'], + 'public': image['Public'], + 'virtualization_type': image['VirtualizationType'], + 'architecture': image['Architecture'], + 'state': image['State'], + 'platform': image.get('Platform'), + 'image_type': image['ImageType'], + 'hypervisor': image['Hypervisor'], + 'root_device_name': image['RootDeviceName'], + 'root_device_type': image['RootDeviceType'], + 'sriov': image.get('SriovNetSupport', 'simple') + } + result.append(tmp) + except Exception as err: + pass + +print(json.dumps(result, indent=2)) diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index f0d22fc95..245784cb0 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -56,14 +56,14 @@ def print_implementation_coverage(): else: percentage_implemented = 0 - print("-----------------------") - print("{} - {}% implemented".format(service_name, percentage_implemented)) - print("-----------------------") + print("") + print("## {} - {}% implemented".format(service_name, percentage_implemented)) for op in operations: if op in implemented: - print("[X] {}".format(op)) + print("- [X] {}".format(op)) else: - print("[ ] {}".format(op)) + print("- [ ] {}".format(op)) + if __name__ == '__main__': print_implementation_coverage() diff --git a/scripts/scaffold.py b/scripts/scaffold.py index b1c9f3a0f..6c83eeb50 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -81,12 +81,14 @@ def select_service_and_operation(): raise click.Abort() return service_name, operation_name +def get_escaped_service(service): + return service.replace('-', '') def get_lib_dir(service): - return os.path.join('moto', service) + return os.path.join('moto', get_escaped_service(service)) def get_test_dir(service): - return os.path.join('tests', 'test_{}'.format(service)) + return os.path.join('tests', 'test_{}'.format(get_escaped_service(service))) def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): @@ -117,7 +119,7 @@ def append_mock_to_init_py(service): filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from .{} import mock_{} # flake8: noqa'.format(service, service) + new_line = 'from .{} import mock_{} # flake8: noqa'.format(get_escaped_service(service), get_escaped_service(service)) lines.insert(last_import_line_index + 1, new_line) body = '\n'.join(lines) + '\n' @@ -135,7 +137,7 @@ def append_mock_import_to_backends_py(service): filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from moto.{} import {}_backends'.format(service, service) + new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service)) lines.insert(last_import_line_index + 1, new_line) body = '\n'.join(lines) + '\n' @@ -147,13 +149,12 @@ def append_mock_dict_to_backends_py(service): with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] - # 'xray': xray_backends if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)): return filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " '{}': {}_backends,".format(service, service) + new_line = " '{}': {}_backends,".format(service, get_escaped_service(service)) prev_line = lines[last_elem_line_index] if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' @@ -166,8 +167,8 @@ def append_mock_dict_to_backends_py(service): def initialize_service(service, operation, api_protocol): """create lib and test dirs if not exist """ - lib_dir = os.path.join('moto', service) - test_dir = os.path.join('tests', 'test_{}'.format(service)) + lib_dir = get_lib_dir(service) + test_dir = get_test_dir(service) print_progress('Initializing service', service, 'green') @@ -178,7 +179,9 @@ def initialize_service(service, operation, api_protocol): tmpl_context = { 'service': service, 'service_class': service_class, - 'endpoint_prefix': endpoint_prefix + 'endpoint_prefix': endpoint_prefix, + 'api_protocol': api_protocol, + 'escaped_service': get_escaped_service(service) } # initialize service directory @@ -202,7 +205,7 @@ def initialize_service(service, operation, api_protocol): os.makedirs(test_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') for tmpl_filename in os.listdir(tmpl_dir): - alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None + alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None render_template( tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) @@ -212,9 +215,16 @@ def initialize_service(service, operation, api_protocol): append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service) + def to_upper_camel_case(s): return ''.join([_.title() for _ in s.split('_')]) + +def to_lower_camel_case(s): + words = s.split('_') + return ''.join(words[:1] + [_.title() for _ in words[1:]]) + + def to_snake_case(s): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() @@ -229,25 +239,28 @@ def get_function_in_responses(service, operation, protocol): aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) - outputs = op_model.output_shape.members + if not hasattr(op_model.output_shape, 'members'): + outputs = {} + else: + outputs = op_model.output_shape.members inputs = op_model.input_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] - body = 'def {}(self):\n'.format(operation) + body = '\ndef {}(self):\n'.format(operation) for input_name, input_type in inputs.items(): type_name = input_type.type_name if type_name == 'integer': - arg_line_tmpl = ' {} = _get_int_param("{}")\n' + arg_line_tmpl = ' {} = self._get_int_param("{}")\n' elif type_name == 'list': arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' else: arg_line_tmpl = ' {} = self._get_param("{}")\n' body += arg_line_tmpl.format(to_snake_case(input_name), input_name) if output_names: - body += ' {} = self.{}_backend.{}(\n'.format(','.join(output_names), service, operation) + body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation) else: - body += ' self.{}_backend.{}(\n'.format(service, operation) + body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation) for input_name in input_names: body += ' {}={},\n'.format(input_name, input_name) @@ -255,11 +268,11 @@ def get_function_in_responses(service, operation, protocol): if protocol == 'query': body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) body += ' return template.render({})\n'.format( - ','.join(['{}={}'.format(_, _) for _ in output_names]) + ', '.join(['{}={}'.format(_, _) for _ in output_names]) ) - elif protocol == 'json': - body += ' # TODO: adjust reponse\n' - body += ' return json.dumps({})\n'.format(','.join(['{}={}'.format(_, _) for _ in output_names])) + elif protocol in ['json', 'rest-json']: + body += ' # TODO: adjust response\n' + body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names])) return body @@ -272,7 +285,10 @@ def get_function_in_models(service, operation): aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members - outputs = op_model.output_shape.members + if not hasattr(op_model.output_shape, 'members'): + outputs = {} + else: + outputs = op_model.output_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] if input_names: @@ -280,7 +296,7 @@ def get_function_in_models(service, operation): else: body = 'def {}(self)\n' body += ' # implement here\n' - body += ' return {}\n'.format(', '.join(output_names)) + body += ' return {}\n\n'.format(', '.join(output_names)) return body @@ -388,13 +404,13 @@ def insert_code_to_class(path, base_class, new_code): f.write(body) -def insert_url(service, operation): +def insert_url(service, operation, api_protocol): client = boto3.client(service) service_class = client.__class__.__name__ aws_operation_name = to_upper_camel_case(operation) uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] - path = os.path.join(os.path.dirname(__file__), '..', 'moto', service, 'urls.py') + path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py') with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] @@ -413,81 +429,55 @@ def insert_url(service, operation): if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' - new_line = " '{0}%s$': %sResponse.dispatch," % ( - uri, service_class - ) + # generate url pattern + if api_protocol == 'rest-json': + new_line = " '{0}/.*$': response.dispatch," + else: + new_line = " '{0}%s$': %sResponse.dispatch," % ( + uri, service_class + ) + if new_line in lines: + return lines.insert(last_elem_line_index + 1, new_line) body = '\n'.join(lines) + '\n' with open(path, 'w') as f: f.write(body) - -def insert_query_codes(service, operation): - func_in_responses = get_function_in_responses(service, operation, 'query') +def insert_codes(service, operation, api_protocol): + func_in_responses = get_function_in_responses(service, operation, api_protocol) func_in_models = get_function_in_models(service, operation) - template = get_response_query_template(service, operation) - # edit responses.py - responses_path = 'moto/{}/responses.py'.format(service) + responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service)) print_progress('inserting code', responses_path, 'green') insert_code_to_class(responses_path, BaseResponse, func_in_responses) # insert template - with open(responses_path) as f: - lines = [_[:-1] for _ in f.readlines()] - lines += template.splitlines() - with open(responses_path, 'w') as f: - f.write('\n'.join(lines)) + if api_protocol == 'query': + template = get_response_query_template(service, operation) + with open(responses_path) as f: + lines = [_[:-1] for _ in f.readlines()] + lines += template.splitlines() + with open(responses_path, 'w') as f: + f.write('\n'.join(lines)) # edit models.py - models_path = 'moto/{}/models.py'.format(service) + models_path = 'moto/{}/models.py'.format(get_escaped_service(service)) print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) # edit urls.py - insert_url(service, operation) + insert_url(service, operation, api_protocol) -def insert_json_codes(service, operation): - func_in_responses = get_function_in_responses(service, operation, 'json') - func_in_models = get_function_in_models(service, operation) - - # edit responses.py - responses_path = 'moto/{}/responses.py'.format(service) - print_progress('inserting code', responses_path, 'green') - insert_code_to_class(responses_path, BaseResponse, func_in_responses) - - # edit models.py - models_path = 'moto/{}/models.py'.format(service) - print_progress('inserting code', models_path, 'green') - insert_code_to_class(models_path, BaseBackend, func_in_models) - - # edit urls.py - insert_url(service, operation) - -def insert_restjson_codes(service, operation): - func_in_models = get_function_in_models(service, operation) - - print_progress('skipping inserting code to responses.py', "dont't know how to implement", 'yellow') - # edit models.py - models_path = 'moto/{}/models.py'.format(service) - print_progress('inserting code', models_path, 'green') - insert_code_to_class(models_path, BaseBackend, func_in_models) - - # edit urls.py - insert_url(service, operation) @click.command() def main(): service, operation = select_service_and_operation() api_protocol = boto3.client(service)._service_model.metadata['protocol'] initialize_service(service, operation, api_protocol) - if api_protocol == 'query': - insert_query_codes(service, operation) - elif api_protocol == 'json': - insert_json_codes(service, operation) - elif api_protocol == 'rest-json': - insert_restjson_codes(service, operation) + + if api_protocol in ['query', 'json', 'rest-json']: + insert_codes(service, operation, api_protocol) else: print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') diff --git a/scripts/template/lib/__init__.py.j2 b/scripts/template/lib/__init__.py.j2 index 8e5bf50c7..5aade5706 100644 --- a/scripts/template/lib/__init__.py.j2 +++ b/scripts/template/lib/__init__.py.j2 @@ -1,7 +1,7 @@ from __future__ import unicode_literals -from .models import {{ service }}_backends +from .models import {{ escaped_service }}_backends from ..core.models import base_decorator -{{ service }}_backend = {{ service }}_backends['us-east-1'] -mock_{{ service }} = base_decorator({{ service }}_backends) +{{ escaped_service }}_backend = {{ escaped_service }}_backends['us-east-1'] +mock_{{ escaped_service }} = base_decorator({{ escaped_service }}_backends) diff --git a/scripts/template/lib/models.py.j2 b/scripts/template/lib/models.py.j2 index 623321884..28fa4a4e1 100644 --- a/scripts/template/lib/models.py.j2 +++ b/scripts/template/lib/models.py.j2 @@ -17,4 +17,4 @@ class {{ service_class }}Backend(BaseBackend): available_regions = boto3.session.Session().get_available_regions("{{ service }}") -{{ service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions} +{{ escaped_service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions} diff --git a/scripts/template/lib/responses.py.j2 b/scripts/template/lib/responses.py.j2 index 85827e651..60d0048e3 100644 --- a/scripts/template/lib/responses.py.j2 +++ b/scripts/template/lib/responses.py.j2 @@ -1,12 +1,14 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from .models import {{ service }}_backends +from .models import {{ escaped_service }}_backends +import json class {{ service_class }}Response(BaseResponse): + SERVICE_NAME = '{{ service }}' @property - def {{ service }}_backend(self): - return {{ service }}_backends[self.region] + def {{ escaped_service }}_backend(self): + return {{ escaped_service }}_backends[self.region] # add methods from here diff --git a/scripts/template/lib/urls.py.j2 b/scripts/template/lib/urls.py.j2 index 53cc03c0e..47ae52f2d 100644 --- a/scripts/template/lib/urls.py.j2 +++ b/scripts/template/lib/urls.py.j2 @@ -5,5 +5,9 @@ url_bases = [ "https?://{{ endpoint_prefix }}.(.+).amazonaws.com", ] +{% if api_protocol == 'rest-json' %} +response = {{ service_class }}Response() +{% endif %} + url_paths = { } diff --git a/scripts/template/test/test_server.py.j2 b/scripts/template/test/test_server.py.j2 index f3963a743..c85dbf01c 100644 --- a/scripts/template/test/test_server.py.j2 +++ b/scripts/template/test/test_server.py.j2 @@ -3,14 +3,14 @@ from __future__ import unicode_literals import sure # noqa import moto.server as server -from moto import mock_{{ service }} +from moto import mock_{{ escaped_service }} ''' Test the different server responses ''' -@mock_{{ service }} -def test_{{ service }}_list(): +@mock_{{ escaped_service }} +def test_{{ escaped_service }}_list(): backend = server.create_backend_app("{{ service }}") test_client = backend.test_client() # do test diff --git a/scripts/template/test/test_service.py.j2 b/scripts/template/test/test_service.py.j2 index 076f92e27..799f6079f 100644 --- a/scripts/template/test/test_service.py.j2 +++ b/scripts/template/test/test_service.py.j2 @@ -2,10 +2,10 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_{{ service }} +from moto import mock_{{ escaped_service }} -@mock_{{ service }} +@mock_{{ escaped_service }} def test_list(): # do test pass diff --git a/setup.py b/setup.py index bdb8a1dd6..fdd5b5a48 100755 --- a/setup.py +++ b/setup.py @@ -21,7 +21,8 @@ install_requires = [ "python-dateutil<3.0.0,>=2.1", "mock", "docker>=2.5.1", - "aws-xray-sdk>=0.93" + "jsondiff==1.1.1", + "aws-xray-sdk>=0.93", ] extras_require = { @@ -38,7 +39,7 @@ else: setup( name='moto', - version='1.1.23', + version='1.1.24', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index db1969645..ccac48181 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -4,6 +4,7 @@ import os import boto3 from freezegun import freeze_time import sure # noqa +import uuid from botocore.exceptions import ClientError @@ -281,11 +282,23 @@ def test_resend_validation_email_invalid(): def test_request_certificate(): client = boto3.client('acm', region_name='eu-central-1') + token = str(uuid.uuid4()) + resp = client.request_certificate( DomainName='google.com', + IdempotencyToken=token, SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], ) resp.should.contain('CertificateArn') + arn = resp['CertificateArn'] + + resp = client.request_certificate( + DomainName='google.com', + IdempotencyToken=token, + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + resp['CertificateArn'].should.equal(arn) + @mock_acm def test_request_certificate_no_san(): diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 317e9f4a2..7bdfe3256 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -488,6 +488,7 @@ def lambda_handler(event, context): assert 'FunctionError' in result assert result['FunctionError'] == 'Handled' + @mock_lambda @mock_s3 def test_tags(): @@ -554,6 +555,7 @@ def test_tags(): TagKeys=['spam'] )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + @mock_lambda def test_tags_not_found(): """ @@ -574,6 +576,7 @@ def test_tags_not_found(): TagKeys=['spam'] ).should.throw(botocore.client.ClientError) + @mock_lambda def test_invoke_async_function(): conn = boto3.client('lambda', 'us-west-2') @@ -581,10 +584,8 @@ def test_invoke_async_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, + Handler='lambda_function.lambda_handler', + Code={'ZipFile': get_test_zip_file1()}, Description='test lambda function', Timeout=3, MemorySize=128, @@ -593,11 +594,12 @@ def test_invoke_async_function(): success_result = conn.invoke_async( FunctionName='testFunction', - InvokeArgs=json.dumps({ 'test': 'event' }) + InvokeArgs=json.dumps({'test': 'event'}) ) success_result['Status'].should.equal(202) + @mock_lambda @freeze_time('2015-01-01 00:00:00') def test_get_function_created_with_zipfile(): @@ -646,6 +648,7 @@ def test_get_function_created_with_zipfile(): }, ) + @mock_lambda def add_function_permission(): conn = boto3.client('lambda', 'us-west-2') diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index df696d879..711d9153f 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -38,7 +38,7 @@ from moto import ( mock_sns_deprecated, mock_sqs, mock_sqs_deprecated, -) + mock_elbv2) from .fixtures import ( ec2_classic_eip, @@ -2111,3 +2111,158 @@ def test_stack_spot_fleet(): launch_spec['SubnetId'].should.equal(subnet_id) launch_spec['SpotPrice'].should.equal("0.13") launch_spec['WeightedCapacity'].should.equal(2.0) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_stack_elbv2_resources_integration(): + alb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "albdns": { + "Description": "Load balanacer DNS", + "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, + }, + "albname": { + "Description": "Load balancer name", + "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, + }, + }, + "Resources": { + "alb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "myelbv2", + "Scheme": "internet-facing", + "Subnets": [{ + "Ref": "mysubnet", + }], + "SecurityGroups": [{ + "Ref": "mysg", + }], + "Type": "application", + "IpAddressType": "ipv4", + } + }, + "mytargetgroup": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 80, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Matcher": { + "HttpCode": "200,201" + }, + "Name": "mytargetgroup", + "Port": 80, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 80, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "listener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "DefaultActions": [{ + "Type": "forward", + "TargetGroupArn": {"Ref": "mytargetgroup"} + }], + "LoadBalancerArn": {"Ref": "alb"}, + "Port": "80", + "Protocol": "HTTP" + } + }, + "myvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + "mysubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/27", + "VpcId": {"Ref": "myvpc"}, + } + }, + "mysg": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "mysg", + "GroupDescription": "test security group", + "VpcId": {"Ref": "myvpc"} + } + }, + "ec2instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + alb_template_json = json.dumps(alb_template) + + cfn_conn = boto3.client("cloudformation", "us-west-1") + cfn_conn.create_stack( + StackName="elb_stack", + TemplateBody=alb_template_json, + ) + + elbv2_conn = boto3.client("elbv2", "us-west-1") + + load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] + len(load_balancers).should.equal(1) + load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') + load_balancers[0]['Scheme'].should.equal('internet-facing') + load_balancers[0]['Type'].should.equal('application') + load_balancers[0]['IpAddressType'].should.equal('ipv4') + + target_groups = elbv2_conn.describe_target_groups()['TargetGroups'] + len(target_groups).should.equal(1) + target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[0]['HealthCheckPath'].should.equal('/status') + target_groups[0]['HealthCheckPort'].should.equal('80') + target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[0]['HealthyThresholdCount'].should.equal(30) + target_groups[0]['UnhealthyThresholdCount'].should.equal(5) + target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) + target_groups[0]['TargetGroupName'].should.equal('mytargetgroup') + target_groups[0]['Port'].should.equal(80) + target_groups[0]['Protocol'].should.equal('HTTP') + target_groups[0]['TargetType'].should.equal('instance') + + listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] + len(listeners).should.equal(1) + listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) + listeners[0]['Port'].should.equal(80) + listeners[0]['Protocol'].should.equal('HTTP') + listeners[0]['DefaultActions'].should.equal([{ + "Type": "forward", + "TargetGroupArn": target_groups[0]['TargetGroupArn'] + }]) + + # test outputs + stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] + len(stacks).should.equal(1) + + dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] + name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] + + dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) + name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 9b3f76c36..2f8528855 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -118,12 +118,3 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) - - -@mock_cloudwatch_deprecated -def test_describe_state_value_unimplemented(): - conn = boto.connect_cloudwatch() - - conn.describe_alarms() - conn.describe_alarms.when.called_with( - state_value="foo").should.throw(NotImplementedError) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 923ba0b75..e621a642a 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -87,6 +87,54 @@ def test_get_dashboard_fail(): raise RuntimeError('Should of raised error') +@mock_cloudwatch +def test_alarm_state(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + client.put_metric_alarm( + AlarmName='testalarm1', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + client.put_metric_alarm( + AlarmName='testalarm2', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + + # This is tested implicitly as if it doesnt work the rest will die + client.set_alarm_state( + AlarmName='testalarm1', + StateValue='ALARM', + StateReason='testreason', + StateReasonData='{"some": "json_data"}' + ) + + resp = client.describe_alarms( + StateValue='ALARM' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + + resp = client.describe_alarms( + StateValue='OK' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + + # Just for sanity + resp = client.describe_alarms() + len(resp['MetricAlarms']).should.equal(2) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 5df03f8d8..17c5310d4 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -28,13 +28,13 @@ except ImportError: @mock_dynamodb2_deprecated def test_list_tables(): name = 'TestTable' - #{'schema': } + # Should make tables properly with boto dynamodb_backend2.create_table(name, schema=[ {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, {u'KeyType': u'RANGE', u'AttributeName': u'subject'} ]) conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk") assert conn.list_tables()["TableNames"] == [name] @@ -43,6 +43,7 @@ def test_list_tables(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables_layer_1(): + # Should make tables properly with boto dynamodb_backend2.create_table("test_1", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) @@ -50,7 +51,7 @@ def test_list_tables_layer_1(): {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk") @@ -88,12 +89,22 @@ def test_list_table_tags(): ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) table_description = conn.describe_table(TableName=name) arn = table_description['Table']['TableArn'] - tags = [{'Key':'TestTag', 'Value': 'TestValue'}] - conn.tag_resource(ResourceArn=arn, - Tags=tags) + + # Tag table + tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] + conn.tag_resource(ResourceArn=arn, Tags=tags) + + # Check tags resp = conn.list_tags_of_resource(ResourceArn=arn) assert resp["Tags"] == tags + # Remove 1 tag + conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] + @requires_boto_gte("2.9") @mock_dynamodb2 @@ -356,10 +367,21 @@ def test_basic_projection_expressions(): ) assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] assert results['Items'][0]['body'] == 'some test message' assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] assert results['Items'][1]['body'] == 'yet another test message' + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + @mock_dynamodb2 def test_basic_projection_expressions_with_attr_expression_names(): @@ -638,6 +660,47 @@ def test_filter_expression(): filter_expr.expr(row1).should.be(True) +@mock_dynamodb2 +def test_query_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.query( + KeyConditionExpression=Key('client').eq('client1') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 1 + assert response['Items'][0]['app'] == 'app2' + + @mock_dynamodb2 def test_scan_filter(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -868,3 +931,78 @@ def test_delete_item(): response = table.scan() assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_describe_limits(): + client = boto3.client('dynamodb', region_name='eu-central-1') + resp = client.describe_limits() + + resp['AccountMaxReadCapacityUnits'].should.equal(20000) + resp['AccountMaxWriteCapacityUnits'].should.equal(20000) + resp['TableMaxWriteCapacityUnits'].should.equal(10000) + resp['TableMaxReadCapacityUnits'].should.equal(10000) + + +@mock_dynamodb2 +def test_set_ttl(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': True, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') + resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': False, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') + + +# https://github.com/spulec/moto/issues/1043 +@mock_dynamodb2 +def test_query_missing_expr_names(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) + client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) + + resp = client.query(TableName='test1', KeyConditionExpression='client=:client', + ExpressionAttributeValues={':client': {'S': 'test1'}}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test1') + + resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', + ExpressionAttributeNames={':name': 'client'}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test2') diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 0e1099559..5e635d5ef 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -54,7 +54,7 @@ def test_create_table(): } } conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk" ) @@ -425,7 +425,7 @@ def test_get_special_item(): @mock_dynamodb2_deprecated def test_update_item_remove(): - conn = boto.dynamodb2.connect_to_region("us-west-2") + conn = boto.dynamodb2.connect_to_region("us-east-1") table = Table.create('messages', schema=[ HashKey('username') ]) @@ -452,7 +452,7 @@ def test_update_item_remove(): @mock_dynamodb2_deprecated def test_update_item_set(): - conn = boto.dynamodb2.connect_to_region("us-west-2") + conn = boto.dynamodb2.connect_to_region("us-east-1") table = Table.create('messages', schema=[ HashKey('username') ]) diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index cf9f73f0e..1029ba39e 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -666,10 +666,6 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none -""" -Boto3 -""" - @mock_ec2 def test_ami_filter_wildcard(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -678,3 +674,20 @@ def test_ami_filter_wildcard(): filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}])) assert filter_result == [image] + +@mock_ec2 +def test_ami_filter_by_owner_id(): + client = boto3.client('ec2', region_name='us-east-1') + + ubuntu_id = '099720109477' + + ubuntu_images = client.describe_images(Owners=[ubuntu_id]) + all_images = client.describe_images() + + ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] + all_ids = [ami['OwnerId'] for ami in all_images['Images']] + + # Assert all ubuntu_ids are the same and one equals ubuntu_id + assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id + # Check we actually have a subset of images + assert len(ubuntu_ids) < len(all_ids) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 4427d4843..9c07f38d6 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -17,12 +17,14 @@ def test_create_and_delete_volume(): volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() - all_volumes.should.have.length_of(1) - all_volumes[0].size.should.equal(80) - all_volumes[0].zone.should.equal("us-east-1a") - all_volumes[0].encrypted.should.be(False) - volume = all_volumes[0] + current_volume = [item for item in all_volumes if item.id == volume.id] + current_volume.should.have.length_of(1) + current_volume[0].size.should.equal(80) + current_volume[0].zone.should.equal("us-east-1a") + current_volume[0].encrypted.should.be(False) + + volume = current_volume[0] with assert_raises(EC2ResponseError) as ex: volume.delete(dry_run=True) @@ -33,7 +35,9 @@ def test_create_and_delete_volume(): volume.delete() - conn.get_all_volumes().should.have.length_of(0) + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) # Deleting something that was already deleted should throw an error with assert_raises(EC2ResponseError) as cm: @@ -57,7 +61,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): conn = boto.connect_ec2('the_key', 'the_secret') - conn.create_volume(80, "us-east-1a", encrypted=True) + volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) @@ -66,7 +70,7 @@ def test_create_encrypted_volume(): ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - all_volumes = conn.get_all_volumes() + all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] all_volumes[0].encrypted.should.be(True) @@ -116,67 +120,69 @@ def test_volume_filters(): block_mapping = instance.block_device_mapping['/dev/sda1'] + volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) + volumes_by_attach_time = conn.get_all_volumes( filters={'attachment.attach-time': block_mapping.attach_time}) set([vol.id for vol in volumes_by_attach_time] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_device = conn.get_all_volumes( filters={'attachment.device': '/dev/sda1'}) set([vol.id for vol in volumes_by_attach_device] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_instance_id = conn.get_all_volumes( filters={'attachment.instance-id': instance.id}) set([vol.id for vol in volumes_by_attach_instance_id] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_status = conn.get_all_volumes( filters={'attachment.status': 'attached'}) set([vol.id for vol in volumes_by_attach_status] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_create_time = conn.get_all_volumes( filters={'create-time': volume4.create_time}) set([vol.create_time for vol in volumes_by_create_time] - ).should.equal(set([volume4.create_time])) + ).should.equal({volume4.create_time}) volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) - set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id])) + set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) volumes_by_snapshot_id = conn.get_all_volumes( filters={'snapshot-id': snapshot.id}) set([vol.id for vol in volumes_by_snapshot_id] - ).should.equal(set([volume4.id])) + ).should.equal({volume4.id}) volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) set([vol.id for vol in volumes_by_status]).should.equal( - set([block_mapping.volume_id])) + {block_mapping.volume_id}) volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) - set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) - set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) volumes_by_tag_value = conn.get_all_volumes( filters={'tag-value': 'testvalue1'}) set([vol.id for vol in volumes_by_tag_value] - ).should.equal(set([volume1.id])) + ).should.equal({volume1.id}) volumes_by_tag = conn.get_all_volumes( filters={'tag:testkey1': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) volumes_by_unencrypted = conn.get_all_volumes( filters={'encrypted': 'false'}) - set([vol.id for vol in volumes_by_unencrypted]).should.equal( - set([block_mapping.volume_id, volume2.id]) + set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( + {block_mapping.volume_id, volume2.id} ) volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) - set([vol.id for vol in volumes_by_encrypted]).should.equal( - set([volume1.id, volume3.id, volume4.id]) + set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( + {volume1.id, volume3.id, volume4.id} ) @@ -252,18 +258,20 @@ def test_create_snapshot(): snapshot.update() snapshot.status.should.equal('completed') - snapshots = conn.get_all_snapshots() + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] snapshots.should.have.length_of(1) snapshots[0].description.should.equal('a test snapshot') snapshots[0].start_time.should_not.be.none snapshots[0].encrypted.should.be(False) # Create snapshot without description + num_snapshots = len(conn.get_all_snapshots()) + snapshot = volume.create_snapshot() - conn.get_all_snapshots().should.have.length_of(2) + conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) snapshot.delete() - conn.get_all_snapshots().should.have.length_of(1) + conn.get_all_snapshots().should.have.length_of(num_snapshots) # Deleting something that was already deleted should throw an error with assert_raises(EC2ResponseError) as cm: @@ -281,7 +289,7 @@ def test_create_encrypted_snapshot(): snapshot.update() snapshot.status.should.equal('completed') - snapshots = conn.get_all_snapshots() + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] snapshots.should.have.length_of(1) snapshots[0].description.should.equal('a test snapshot') snapshots[0].start_time.should_not.be.none @@ -331,52 +339,52 @@ def test_snapshot_filters(): snapshots_by_description = conn.get_all_snapshots( filters={'description': 'testsnapshot1'}) set([snap.id for snap in snapshots_by_description] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_id = conn.get_all_snapshots( filters={'snapshot-id': snapshot1.id}) set([snap.id for snap in snapshots_by_id] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_start_time = conn.get_all_snapshots( filters={'start-time': snapshot1.start_time}) set([snap.start_time for snap in snapshots_by_start_time] - ).should.equal(set([snapshot1.start_time])) + ).should.equal({snapshot1.start_time}) snapshots_by_volume_id = conn.get_all_snapshots( filters={'volume-id': volume1.id}) set([snap.id for snap in snapshots_by_volume_id] - ).should.equal(set([snapshot1.id, snapshot2.id])) + ).should.equal({snapshot1.id, snapshot2.id}) snapshots_by_status = conn.get_all_snapshots( filters={'status': 'completed'}) - set([snap.id for snap in snapshots_by_status] - ).should.equal(set([snapshot1.id, snapshot2.id, snapshot3.id])) + ({snapshot1.id, snapshot2.id, snapshot3.id} - + {snap.id for snap in snapshots_by_status}).should.have.length_of(0) snapshots_by_volume_size = conn.get_all_snapshots( filters={'volume-size': volume1.size}) set([snap.id for snap in snapshots_by_volume_size] - ).should.equal(set([snapshot1.id, snapshot2.id])) + ).should.equal({snapshot1.id, snapshot2.id}) snapshots_by_tag_key = conn.get_all_snapshots( filters={'tag-key': 'testkey1'}) set([snap.id for snap in snapshots_by_tag_key] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_tag_value = conn.get_all_snapshots( filters={'tag-value': 'testvalue1'}) set([snap.id for snap in snapshots_by_tag_value] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_tag = conn.get_all_snapshots( filters={'tag:testkey1': 'testvalue1'}) set([snap.id for snap in snapshots_by_tag] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_encrypted = conn.get_all_snapshots( filters={'encrypted': 'true'}) set([snap.id for snap in snapshots_by_encrypted] - ).should.equal(set([snapshot3.id])) + ).should.equal({snapshot3.id}) @mock_ec2_deprecated @@ -563,9 +571,11 @@ def test_volume_tag_escaping(): ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - dict(conn.get_all_snapshots()[0].tags).should_not.be.equal( + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should_not.be.equal( {'key': ''}) snapshot.add_tags({'key': ''}) - dict(conn.get_all_snapshots()[0].tags).should.equal({'key': ''}) + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should.equal({'key': ''}) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 46bb34d57..5cfe01618 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -5,7 +5,9 @@ from nose.tools import assert_raises import base64 import datetime +import ipaddress +import six import boto import boto3 from boto.ec2.instance import Reservation, InstanceAttribute @@ -215,7 +217,6 @@ def test_create_with_tags(): len(instances['Instances'][0]['Tags']).should.equal(3) - @mock_ec2_deprecated def test_get_instances_filtering_by_state(): conn = boto.connect_ec2() @@ -413,6 +414,7 @@ def test_get_instances_filtering_by_image_id(): 'Values': [image_id]}])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_private_dns(): image_id = 'ami-1234abcd' @@ -427,6 +429,7 @@ def test_get_instances_filtering_by_private_dns(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_ni_private_dns(): image_id = 'ami-1234abcd' @@ -441,6 +444,7 @@ def test_get_instances_filtering_by_ni_private_dns(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_instance_group_name(): image_id = 'ami-1234abcd' @@ -458,6 +462,7 @@ def test_get_instances_filtering_by_instance_group_name(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_instance_group_id(): image_id = 'ami-1234abcd' @@ -476,6 +481,7 @@ def test_get_instances_filtering_by_instance_group_id(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2_deprecated def test_get_instances_filtering_by_tag(): conn = boto.connect_ec2() @@ -830,18 +836,113 @@ def test_run_instance_with_placement(): instance.placement.should.equal("us-east-1b") -@mock_ec2_deprecated -def test_run_instance_with_subnet(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id) - instance = reservation.instances[0] +@mock_ec2 +def test_run_instance_with_subnet_boto3(): + client = boto3.client('ec2', region_name='eu-central-1') - instance.subnet_id.should.equal(subnet.id) + ip_networks = [ + (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), + (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) + ] - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) + # Tests instances are created with the correct IPs + for vpc_cidr, subnet_cidr in ip_networks: + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + + priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) + subnet_cidr.should.contain(priv_ipv4) + + +@mock_ec2 +def test_run_instance_with_specified_private_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id, + PrivateIpAddress='192.168.42.5' + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + instance['PrivateIpAddress'].should.equal('192.168.42.5') + + +@mock_ec2 +def test_run_instance_mapped_public_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + client.modify_subnet_attribute( + SubnetId=subnet_id, + MapPublicIpOnLaunch={'Value': True} + ) + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance.should.contain('PublicDnsName') + instance.should.contain('PublicIpAddress') + len(instance['PublicDnsName']).should.be.greater_than(0) + len(instance['PublicIpAddress']).should.be.greater_than(0) @mock_ec2_deprecated @@ -853,7 +954,7 @@ def test_run_instance_with_nic_autocreated(): 'test security group #1', 'this is a test security group') security_group2 = conn.create_security_group( 'test security group #2', 'this is a test security group') - private_ip = "54.0.0.1" + private_ip = "10.0.0.1" reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, security_groups=[security_group1.name], @@ -880,6 +981,7 @@ def test_run_instance_with_nic_autocreated(): eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + @mock_ec2_deprecated def test_run_instance_with_nic_preexisting(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -1012,6 +1114,7 @@ def test_ec2_classic_has_public_ip_address(): instance.private_ip_address.should_not.equal(None) instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) + @mock_ec2_deprecated def test_run_instance_with_keypair(): conn = boto.connect_ec2('the_key', 'the_secret') diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 6e6c62741..b27484468 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -126,9 +126,9 @@ def test_route_tables_filters_associations(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/18") - subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/18") - subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/18") + subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") route_table1 = conn.create_route_table(vpc.id) route_table2 = conn.create_route_table(vpc.id) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index bb3a8d36b..ccef5a288 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -356,7 +356,7 @@ def test_retrieved_snapshots_must_contain_their_tags(): # Fetch the snapshot again all_snapshots = conn.get_all_snapshots() - snapshot = all_snapshots[0] + snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] retrieved_tags = snapshot.tags conn.delete_snapshot(snapshot.id) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 9b6e99b57..9e5e4ff08 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -1611,6 +1611,152 @@ def test_update_service_through_cloudformation_should_trigger_replacement(): len(resp['serviceArns']).should.equal(1) +@mock_ec2 +@mock_ecs +def test_attributes(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn1 = response['containerInstance']['containerInstanceArn'] + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn2 = response['containerInstance']['containerInstanceArn'] + partial_arn2 = full_arn2.rsplit('/', 1)[-1] + + full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) + + # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + ] + ) + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + len(attrs).should.equal(4) + + # Tests that the attrs have been set properly + len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) + len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + + ecs_client.delete_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + ] + ) + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + len(attrs).should.equal(3) + + +@mock_ecs +def test_poll_endpoint(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + + # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception + resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') + resp.should.contain('endpoint') + resp.should.contain('telemetryEndpoint') + + +@mock_ecs +def test_list_task_definition_families(): + client = boto3.client('ecs', region_name='us-east-1') + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + client.register_task_definition( + family='alt_test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + resp1 = client.list_task_definition_families() + resp2 = client.list_task_definition_families(familyPrefix='alt') + + len(resp1['families']).should.equal(2) + len(resp2['families']).should.equal(1) + + def _fetch_container_instance_resources(container_instance_description): remaining_resources = {} registered_resources = {} diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 98634c677..4f0b1a9cd 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,11 +1,13 @@ from __future__ import unicode_literals +import os import boto3 import botocore from botocore.exceptions import ClientError from nose.tools import assert_raises import sure # noqa -from moto import mock_elbv2, mock_ec2 +from moto import mock_elbv2, mock_ec2, mock_acm +from moto.elbv2 import elbv2_backends @mock_elbv2 @@ -283,6 +285,21 @@ def test_create_target_group_and_listeners(): load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) response = conn.create_target_group( Name='a-target', Protocol='HTTP', @@ -723,6 +740,21 @@ def test_handle_listener_rules(): load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) response = conn.create_target_group( Name='a-target', Protocol='HTTP', @@ -1030,3 +1062,373 @@ def test_describe_invalid_target_group(): # Check error raises correctly with assert_raises(ClientError): conn.describe_target_groups(Names=['invalid']) + + +@mock_elbv2 +def test_describe_account_limits(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_account_limits() + resp['Limits'][0].should.contain('Name') + resp['Limits'][0].should.contain('Max') + + +@mock_elbv2 +def test_describe_ssl_policies(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_ssl_policies() + len(resp['SslPolicies']).should.equal(5) + + resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) + len(resp['SslPolicies']).should.equal(2) + + +@mock_elbv2 +@mock_ec2 +def test_set_ip_address_type(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + # Internal LBs cant be dualstack yet + with assert_raises(ClientError): + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + # Create internet facing one + response = client.create_load_balancer( + Name='my-lb2', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internet-facing', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_security_groups(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + security_group2 = ec2.create_security_group( + GroupName='b-security-group', Description='Second One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=[security_group.id, security_group2.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) + + with assert_raises(ClientError): + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=['non_existant'] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + subnet3 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1c') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet3.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) + + # Only 1 AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id] + ) + + # Multiple subnets in same AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet2.id] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.modify_load_balancer_attributes( + LoadBalancerArn=arn, + Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] + ) + + # Check its 600 not 60 + response = client.describe_load_balancer_attributes( + LoadBalancerArn=arn + ) + idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] + idle_timeout['Value'].should.equal('600') + + +@mock_elbv2 +@mock_ec2 +def test_modify_target_group(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + arn = response.get('TargetGroups')[0]['TargetGroupArn'] + + client.modify_target_group( + TargetGroupArn=arn, + HealthCheckProtocol='HTTPS', + HealthCheckPort='8081', + HealthCheckPath='/status', + HealthCheckIntervalSeconds=10, + HealthCheckTimeoutSeconds=10, + HealthyThresholdCount=10, + UnhealthyThresholdCount=4, + Matcher={'HttpCode': '200-399'} + ) + + response = client.describe_target_groups( + TargetGroupArns=[arn] + ) + response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') + response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') + response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') + response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') + response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) + response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) + + +@mock_elbv2 +@mock_ec2 +@mock_acm +def test_modify_listener_http_to_https(): + client = boto3.client('elbv2', region_name='eu-central-1') + acm = boto3.client('acm', region_name='eu-central-1') + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Plain HTTP listener + response = client.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] + ) + listener_arn = response['Listeners'][0]['ListenerArn'] + + response = acm.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + google_arn = response['CertificateArn'] + response = acm.request_certificate( + DomainName='yahoo.com', + SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], + ) + yahoo_arn = response['CertificateArn'] + + response = client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False}, + {'CertificateArn': yahoo_arn, 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + response['Listeners'][0]['Port'].should.equal(443) + response['Listeners'][0]['Protocol'].should.equal('HTTPS') + response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') + len(response['Listeners'][0]['Certificates']).should.equal(2) + + # Check default cert, can't do this in server mode + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] + listener.certificate.should.equal(yahoo_arn) + + # No default cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + # Bad cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': 'lalala', 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index da8238f72..e839bde5b 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -3,6 +3,8 @@ import random import boto3 from moto.events import mock_events +from botocore.exceptions import ClientError +from nose.tools import assert_raises RULES = [ @@ -171,11 +173,36 @@ def test_remove_targets(): assert(targets_before - 1 == targets_after) -if __name__ == '__main__': - test_list_rules() - test_describe_rule() - test_enable_disable_rule() - test_list_rule_names_by_target() - test_list_rules() - test_list_targets_by_rule() - test_remove_targets() +@mock_events +def test_permissions(): + client = boto3.client('events', 'eu-central-1') + + client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1') + client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2') + + resp = client.describe_event_bus() + assert len(resp['Policy']['Statement']) == 2 + + client.remove_permission(StatementId='Account2') + + resp = client.describe_event_bus() + assert len(resp['Policy']['Statement']) == 1 + assert resp['Policy']['Statement'][0]['Sid'] == 'Account1' + + +@mock_events +def test_put_events(): + client = boto3.client('events', 'eu-central-1') + + event = { + "Source": "com.mycompany.myapp", + "Detail": '{"key1": "value3", "key2": "value4"}', + "Resources": ["resource1", "resource2"], + "DetailType": "myDetailType" + } + + client.put_events(Entries=[event]) + # Boto3 would error if it didn't return 200 OK + + with assert_raises(ClientError): + client.put_events(Entries=[event]*20) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py new file mode 100644 index 000000000..31631e459 --- /dev/null +++ b/tests/test_iot/test_iot.py @@ -0,0 +1,179 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_iot + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) diff --git a/tests/test_iot/test_server.py b/tests/test_iot/test_server.py new file mode 100644 index 000000000..47091531a --- /dev/null +++ b/tests/test_iot/test_server.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iot + +''' +Test the different server responses +''' + +@mock_iot +def test_iot_list(): + backend = server.create_backend_app("iot") + test_client = backend.test_client() + + # just making sure that server is up + res = test_client.get('/things') + res.status_code.should.equal(404) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py new file mode 100644 index 000000000..5768d31c7 --- /dev/null +++ b/tests/test_iotdata/test_iotdata.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + +import json +import boto3 +import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError +from moto import mock_iotdata, mock_iot + + +@mock_iot +@mock_iotdata +def test_basic(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + client.delete_thing_shadow(thingName=name) + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + +@mock_iot +@mock_iotdata +def test_update(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + # first update + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + # reporting new state + new_payload = b'{"state": {"reported": {"led": "on"}}}' + res = client.update_thing_shadow(thingName=name, payload=new_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') diff --git a/tests/test_iotdata/test_server.py b/tests/test_iotdata/test_server.py new file mode 100644 index 000000000..42a5c5f22 --- /dev/null +++ b/tests/test_iotdata/test_server.py @@ -0,0 +1,20 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iotdata + +''' +Test the different server responses +''' + +@mock_iotdata +def test_iotdata_list(): + backend = server.create_backend_app("iot-data") + test_client = backend.test_client() + + # just making sure that server is up + thing_name = 'nothing' + res = test_client.get('/things/{}/shadow'.format(thing_name)) + res.status_code.should.equal(404) diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index 4e950fc74..c37e9cab7 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -19,4 +19,4 @@ def test_describe_clusters(): res = test_client.get('/?Action=DescribeClusters') result = res.data.decode("utf-8") - result.should.contain("") diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index ac8d6e7ad..76217b9d9 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -119,8 +119,10 @@ def test_rrset(): rrsets = conn.get_all_rrsets( zoneid, name="bar.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') + rrsets.should.have.length_of(2) + resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] + resource_records.should.contain('1.2.3.4') + resource_records.should.contain('5.6.7.8') rrsets = conn.get_all_rrsets( zoneid, name="foo.foo.testdns.aws.com", type="A") @@ -160,7 +162,10 @@ def test_alias_rrset(): changes.commit() rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) + rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] + rrset_records.should.have.length_of(2) + rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) + rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') rrsets = conn.get_all_rrsets(zoneid, type="CNAME") rrsets.should.have.length_of(1) @@ -647,3 +652,60 @@ def test_change_resource_record_invalid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_list_resource_record_sets_name_type_filters(): + conn = boto3.client('route53', region_name='us-east-1') + create_hosted_zone_response = conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] + + def create_resource_record_set(rec_type, rec_name): + payload = { + 'Comment': 'create {} record {}'.format(rec_type, rec_name), + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': rec_name, + 'Type': rec_type, + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) + + # record_type, record_name + all_records = [ + ('A', 'a.a.db'), + ('A', 'a.b.db'), + ('A', 'b.b.db'), + ('CNAME', 'b.b.db'), + ('CNAME', 'b.c.db'), + ('CNAME', 'c.c.db') + ] + for record_type, record_name in all_records: + create_resource_record_set(record_type, record_name) + + start_with = 2 + response = conn.list_resource_record_sets( + HostedZoneId=hosted_zone_id, + StartRecordType=all_records[start_with][0], + StartRecordName=all_records[start_with][1] + ) + + returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] + len(returned_records).should.equal(len(all_records) - start_with) + for desired_record in all_records[start_with:]: + returned_records.should.contain(desired_record) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 87668d8b7..829941d79 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - from __future__ import unicode_literals import datetime @@ -1775,6 +1774,30 @@ def test_boto3_put_object_tagging(): resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) +@mock_s3 +def test_boto3_put_object_tagging_with_single_tag(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'} + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + @mock_s3 def test_boto3_get_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1841,7 +1864,7 @@ def test_boto3_list_object_versions(): def test_boto3_delete_markers(): s3 = boto3.client('s3', region_name='us-east-1') bucket_name = 'mybucket' - key = 'key-with-versions' + key = u'key-with-versions-and-unicode-ó' s3.create_bucket(Bucket=bucket_name) s3.put_bucket_versioning( Bucket=bucket_name, @@ -1856,10 +1879,9 @@ def test_boto3_delete_markers(): Key=key, Body=body ) - s3.delete_object( - Bucket=bucket_name, - Key=key - ) + + s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) + with assert_raises(ClientError) as e: s3.get_object( Bucket=bucket_name, @@ -1881,12 +1903,18 @@ def test_boto3_delete_markers(): Bucket=bucket_name ) response['Versions'].should.have.length_of(2) - response['Versions'][-1]['IsLatest'].should.be.true - response['Versions'][0]['IsLatest'].should.be.false - [(key_metadata['Key'], key_metadata['VersionId']) - for key_metadata in response['Versions']].should.equal( - [('key-with-versions', '0'), ('key-with-versions', '1')] - ) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') @mock_s3 diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index ce505278f..465dfa2c2 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -13,12 +13,12 @@ def test_sns_server_get(): backend = server.create_backend_app("sns") test_client = backend.test_client() - topic_data = test_client.action_data("CreateTopic", Name="test topic") + topic_data = test_client.action_data("CreateTopic", Name="testtopic") topic_data.should.contain("CreateTopicResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:test topic") + "arn:aws:sns:us-east-1:123456789012:testtopic") topics_data = test_client.action_data("ListTopics") topics_data.should.contain("ListTopicsResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:test topic") + "arn:aws:sns:us-east-1:123456789012:testtopic") diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index a9c2a2904..95dd41f89 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -31,6 +31,26 @@ def test_create_and_delete_topic(): topics = topics_json["Topics"] topics.should.have.length_of(0) +@mock_sns +def test_create_topic_should_be_indempodent(): + conn = boto3.client("sns", region_name="us-east-1") + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + conn.set_topic_attributes( + TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue="should_be_set" + ) + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + + #recreate topic to prove indempodentcy + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") @mock_sns def test_get_missing_topic(): @@ -38,6 +58,27 @@ def test_get_missing_topic(): conn.get_topic_attributes.when.called_with( TopicArn="a-fake-arn").should.throw(ClientError) +@mock_sns +def test_create_topic_must_meet_constraints(): + conn = boto3.client("sns", region_name="us-east-1") + common_random_chars = [':', ";", "!", "@", "|", "^", "%"] + for char in common_random_chars: + conn.create_topic.when.called_with( + Name="no%s_invalidchar" % char).should.throw(ClientError) + conn.create_topic.when.called_with( + Name="no spaces allowed").should.throw(ClientError) + + +@mock_sns +def test_create_topic_should_be_of_certain_length(): + conn = boto3.client("sns", region_name="us-east-1") + too_short = "" + conn.create_topic.when.called_with( + Name=too_short).should.throw(ClientError) + too_long = "x" * 257 + conn.create_topic.when.called_with( + Name=too_long).should.throw(ClientError) + @mock_sns def test_create_topic_in_multiple_regions(): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 536261504..c761ec8d9 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals +import os import boto import boto3 @@ -8,14 +9,18 @@ from botocore.exceptions import ClientError from boto.exception import SQSError from boto.sqs.message import RawMessage, Message +from freezegun import freeze_time import base64 +import json import sure # noqa import time +import uuid from moto import settings, mock_sqs, mock_sqs_deprecated from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises +from nose import SkipTest @mock_sqs @@ -93,8 +98,6 @@ def test_message_send_without_attributes(): msg.get('MD5OfMessageBody').should.equal( '58fd9edd83341c29f1aebba81c31e257') msg.shouldnt.have.key('MD5OfMessageAttributes') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -118,8 +121,6 @@ def test_message_send_with_attributes(): '58fd9edd83341c29f1aebba81c31e257') msg.get('MD5OfMessageAttributes').should.equal( '235c5c510d26fb653d073faed50ae77c') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -143,8 +144,6 @@ def test_message_with_complex_attributes(): '58fd9edd83341c29f1aebba81c31e257') msg.get('MD5OfMessageAttributes').should.equal( '8ae21a7957029ef04146b42aeaa18a22') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -755,3 +754,181 @@ def test_delete_message_after_visibility_timeout(): m1_retrieved.delete() assert new_queue.count() == 0 + + +@mock_sqs +def test_batch_change_message_visibility(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') + + with freeze_time("2015-01-01 12:01:00"): + receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) + len(receive_resp['Messages']).should.equal(2) + + handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] + entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] + + resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) + len(resp['Successful']).should.equal(2) + + with freeze_time("2015-01-01 14:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-01 16:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-02 12:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(3) + + +@mock_sqs +def test_permissions(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) + + with assert_raises(ClientError): + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) + + client.remove_permission(QueueUrl=queue_url, Label='account2') + + with assert_raises(ClientError): + client.remove_permission(QueueUrl=queue_url, Label='non_existant') + + +@mock_sqs +def test_tags(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.tag_queue( + QueueUrl=queue_url, + Tags={ + 'test1': 'value1', + 'test2': 'value2', + } + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should.contain('test2') + + client.untag_queue( + QueueUrl=queue_url, + TagKeys=['test2'] + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should_not.contain('test2') + + +@mock_sqs +def test_create_fifo_queue_with_dlq(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-dlr-queue', + Attributes={'FifoQueue': 'false'} + ) + queue_url2 = resp['QueueUrl'] + queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] + + sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + + # Cant have fifo queue with non fifo DLQ + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) + } + ) + + +@mock_sqs +def test_queue_with_dlq(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + sqs = boto3.client('sqs', region_name='us-east-1') + + with freeze_time("2015-01-01 12:00:00"): + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + queue_url2 = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') + + with freeze_time("2015-01-01 13:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:01:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:02:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + len(resp['Messages']).should.equal(1) + + resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + # Might as well test list source queues + + resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) + resp['queueUrls'][0].should.equal(queue_url2) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 7f4aca533..781727c26 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -47,6 +47,51 @@ def test_delete_parameters(): len(response['Parameters']).should.equal(0) +@mock_ssm +def test_get_parameters_by_path(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='/foo/name1', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/foo/name2', + Description='A test parameter', + Value='value2', + Type='String') + + client.put_parameter( + Name='/bar/name3', + Description='A test parameter', + Value='value3', + Type='String') + + client.put_parameter( + Name='/bar/name3/name4', + Description='A test parameter', + Value='value4', + Type='String') + + response = client.get_parameters_by_path(Path='/foo') + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value1', 'value2']) + ) + + response = client.get_parameters_by_path(Path='/bar', Recursive=False) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Value'].should.equal('value3') + + response = client.get_parameters_by_path(Path='/bar', Recursive=True) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value3', 'value4']) + ) + + @mock_ssm def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') diff --git a/tox.ini b/tox.ini index 3fe5d0141..0f3f1466a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26, py27, py33, py34 +envlist = py27, py36 [testenv] deps = diff --git a/wait_for.py b/wait_for.py index ea3639d16..d313ea5a9 100755 --- a/wait_for.py +++ b/wait_for.py @@ -24,7 +24,7 @@ while True: break except EXCEPTIONS: elapsed_s = time.time() - start_ts - if elapsed_s > 30: + if elapsed_s > 60: raise print('.')