diff --git a/AUTHORS.md b/AUTHORS.md index f4160146c..55ac102d5 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -47,3 +47,4 @@ Moto is written by Steve Pulec with contributions from: * [Adam Stauffer](https://github.com/adamstauffer) * [Guy Templeton](https://github.com/gjtempleton) * [Michael van Tellingen](https://github.com/mvantellingen) +* [Jessie Nadler](https://github.com/nadlerjessie) diff --git a/CHANGELOG.md b/CHANGELOG.md index bbce6c343..740aac2cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,43 @@ Moto Changelog Latest ------ +1.1.25 +----- + + * Implemented Iot and Iot-data + * Implemented resource tagging API + * EC2 AMIs now have owners + * Improve codegen scaffolding + * Many small fixes to EC2 support + * CloudFormation ELBv2 support + * UTF fixes for S3 + * Implemented SSM get_parameters_by_path + * More advanced Dynamodb querying + +1.1.24 +----- + + * Implemented Batch + * Fixed regression with moto_server dashboard + * Fixed and closed many outstanding bugs + * Fixed serious performance problem with EC2 reservation listing + * Fixed Route53 list_resource_record_sets + +1.1.23 +----- + + * Implemented X-Ray + * Implemented Autoscaling EC2 attachment + * Implemented Autoscaling Load Balancer methods + * Improved DynamoDB filter expressions + +1.1.22 +----- + + * Lambda policies + * Dynamodb filter expressions + * EC2 Spot fleet improvements + 1.1.21 ----- diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md new file mode 100644 index 000000000..6be7375db --- /dev/null +++ b/IMPLEMENTATION_COVERAGE.md @@ -0,0 +1,3677 @@ + +## acm - 50% implemented +- [X] add_tags_to_certificate +- [X] delete_certificate +- [ ] describe_certificate +- [X] get_certificate +- [ ] import_certificate +- [ ] list_certificates +- [ ] list_tags_for_certificate +- [X] remove_tags_from_certificate +- [X] request_certificate +- [ ] resend_validation_email + +## apigateway - 18% implemented +- [ ] create_api_key +- [ ] create_authorizer +- [ ] create_base_path_mapping +- [X] create_deployment +- [ ] create_documentation_part +- [ ] create_documentation_version +- [ ] create_domain_name +- [ ] create_model +- [ ] create_request_validator +- [X] create_resource +- [X] create_rest_api +- [X] create_stage +- [ ] create_usage_plan +- [ ] create_usage_plan_key +- [ ] delete_api_key +- [ ] delete_authorizer +- [ ] delete_base_path_mapping +- [ ] delete_client_certificate +- [X] delete_deployment +- [ ] delete_documentation_part +- [ ] delete_documentation_version +- [ ] delete_domain_name +- [ ] delete_gateway_response +- [X] delete_integration +- [X] delete_integration_response +- [ ] delete_method +- [X] delete_method_response +- [ ] delete_model +- [ ] delete_request_validator +- [X] delete_resource +- [X] delete_rest_api +- [ ] delete_stage +- [ ] delete_usage_plan +- [ ] delete_usage_plan_key +- [ ] flush_stage_authorizers_cache +- [ ] flush_stage_cache +- [ ] generate_client_certificate +- [ ] get_account +- [ ] get_api_key +- [ ] get_api_keys +- [ ] get_authorizer +- [ ] get_authorizers +- [ ] get_base_path_mapping +- [ ] get_base_path_mappings +- [ ] get_client_certificate +- [ ] get_client_certificates +- [X] get_deployment +- [X] get_deployments +- [ ] get_documentation_part +- [ ] get_documentation_parts +- [ ] get_documentation_version +- [ ] get_documentation_versions +- [ ] get_domain_name +- [ ] get_domain_names +- [ ] get_export +- [ ] get_gateway_response +- [ ] get_gateway_responses +- [X] get_integration +- [X] get_integration_response +- [X] get_method +- [X] get_method_response +- [ ] get_model +- [ ] get_model_template +- [ ] get_models +- [ ] get_request_validator +- [ ] get_request_validators +- [X] get_resource +- [ ] get_resources +- [X] get_rest_api +- [ ] get_rest_apis +- [ ] get_sdk +- [ ] get_sdk_type +- [ ] get_sdk_types +- [X] get_stage +- [X] get_stages +- [ ] get_usage +- [ ] get_usage_plan +- [ ] get_usage_plan_key +- [ ] get_usage_plan_keys +- [ ] get_usage_plans +- [ ] import_api_keys +- [ ] import_documentation_parts +- [ ] import_rest_api +- [ ] put_gateway_response +- [ ] put_integration +- [ ] put_integration_response +- [ ] put_method +- [ ] put_method_response +- [ ] put_rest_api +- [ ] test_invoke_authorizer +- [ ] test_invoke_method +- [ ] update_account +- [ ] update_api_key +- [ ] update_authorizer +- [ ] update_base_path_mapping +- [ ] update_client_certificate +- [ ] update_deployment +- [ ] update_documentation_part +- [ ] update_documentation_version +- [ ] update_domain_name +- [ ] update_gateway_response +- [ ] update_integration +- [ ] update_integration_response +- [ ] update_method +- [ ] update_method_response +- [ ] update_model +- [ ] update_request_validator +- [ ] update_resource +- [ ] update_rest_api +- [X] update_stage +- [ ] update_usage +- [ ] update_usage_plan + +## application-autoscaling - 0% implemented +- [ ] delete_scaling_policy +- [ ] delete_scheduled_action +- [ ] deregister_scalable_target +- [ ] describe_scalable_targets +- [ ] describe_scaling_activities +- [ ] describe_scaling_policies +- [ ] describe_scheduled_actions +- [ ] put_scaling_policy +- [ ] put_scheduled_action +- [ ] register_scalable_target + +## appstream - 0% implemented +- [ ] associate_fleet +- [ ] create_directory_config +- [ ] create_fleet +- [ ] create_image_builder +- [ ] create_image_builder_streaming_url +- [ ] create_stack +- [ ] create_streaming_url +- [ ] delete_directory_config +- [ ] delete_fleet +- [ ] delete_image +- [ ] delete_image_builder +- [ ] delete_stack +- [ ] describe_directory_configs +- [ ] describe_fleets +- [ ] describe_image_builders +- [ ] describe_images +- [ ] describe_sessions +- [ ] describe_stacks +- [ ] disassociate_fleet +- [ ] expire_session +- [ ] list_associated_fleets +- [ ] list_associated_stacks +- [ ] start_fleet +- [ ] start_image_builder +- [ ] stop_fleet +- [ ] stop_image_builder +- [ ] update_directory_config +- [ ] update_fleet +- [ ] update_stack + +## athena - 0% implemented +- [ ] batch_get_named_query +- [ ] batch_get_query_execution +- [ ] create_named_query +- [ ] delete_named_query +- [ ] get_named_query +- [ ] get_query_execution +- [ ] get_query_results +- [ ] list_named_queries +- [ ] list_query_executions +- [ ] start_query_execution +- [ ] stop_query_execution + +## autoscaling - 42% implemented +- [X] attach_instances +- [X] attach_load_balancer_target_groups +- [X] attach_load_balancers +- [ ] complete_lifecycle_action +- [X] create_auto_scaling_group +- [X] create_launch_configuration +- [X] create_or_update_tags +- [X] delete_auto_scaling_group +- [X] delete_launch_configuration +- [ ] delete_lifecycle_hook +- [ ] delete_notification_configuration +- [X] delete_policy +- [ ] delete_scheduled_action +- [ ] delete_tags +- [ ] describe_account_limits +- [ ] describe_adjustment_types +- [X] describe_auto_scaling_groups +- [X] describe_auto_scaling_instances +- [ ] describe_auto_scaling_notification_types +- [X] describe_launch_configurations +- [ ] describe_lifecycle_hook_types +- [ ] describe_lifecycle_hooks +- [X] describe_load_balancer_target_groups +- [X] describe_load_balancers +- [ ] describe_metric_collection_types +- [ ] describe_notification_configurations +- [X] describe_policies +- [ ] describe_scaling_activities +- [ ] describe_scaling_process_types +- [ ] describe_scheduled_actions +- [ ] describe_tags +- [ ] describe_termination_policy_types +- [X] detach_instances +- [X] detach_load_balancer_target_groups +- [X] detach_load_balancers +- [ ] disable_metrics_collection +- [ ] enable_metrics_collection +- [ ] enter_standby +- [X] execute_policy +- [ ] exit_standby +- [ ] put_lifecycle_hook +- [ ] put_notification_configuration +- [ ] put_scaling_policy +- [ ] put_scheduled_update_group_action +- [ ] record_lifecycle_action_heartbeat +- [ ] resume_processes +- [X] set_desired_capacity +- [X] set_instance_health +- [ ] set_instance_protection +- [ ] suspend_processes +- [ ] terminate_instance_in_auto_scaling_group +- [X] update_auto_scaling_group + +## batch - 93% implemented +- [ ] cancel_job +- [X] create_compute_environment +- [X] create_job_queue +- [X] delete_compute_environment +- [X] delete_job_queue +- [X] deregister_job_definition +- [X] describe_compute_environments +- [X] describe_job_definitions +- [X] describe_job_queues +- [X] describe_jobs +- [X] list_jobs +- [X] register_job_definition +- [X] submit_job +- [X] terminate_job +- [X] update_compute_environment +- [X] update_job_queue + +## budgets - 0% implemented +- [ ] create_budget +- [ ] create_notification +- [ ] create_subscriber +- [ ] delete_budget +- [ ] delete_notification +- [ ] delete_subscriber +- [ ] describe_budget +- [ ] describe_budgets +- [ ] describe_notifications_for_budget +- [ ] describe_subscribers_for_notification +- [ ] update_budget +- [ ] update_notification +- [ ] update_subscriber + +## clouddirectory - 0% implemented +- [ ] add_facet_to_object +- [ ] apply_schema +- [ ] attach_object +- [ ] attach_policy +- [ ] attach_to_index +- [ ] attach_typed_link +- [ ] batch_read +- [ ] batch_write +- [ ] create_directory +- [ ] create_facet +- [ ] create_index +- [ ] create_object +- [ ] create_schema +- [ ] create_typed_link_facet +- [ ] delete_directory +- [ ] delete_facet +- [ ] delete_object +- [ ] delete_schema +- [ ] delete_typed_link_facet +- [ ] detach_from_index +- [ ] detach_object +- [ ] detach_policy +- [ ] detach_typed_link +- [ ] disable_directory +- [ ] enable_directory +- [ ] get_directory +- [ ] get_facet +- [ ] get_object_information +- [ ] get_schema_as_json +- [ ] get_typed_link_facet_information +- [ ] list_applied_schema_arns +- [ ] list_attached_indices +- [ ] list_development_schema_arns +- [ ] list_directories +- [ ] list_facet_attributes +- [ ] list_facet_names +- [ ] list_incoming_typed_links +- [ ] list_index +- [ ] list_object_attributes +- [ ] list_object_children +- [ ] list_object_parent_paths +- [ ] list_object_parents +- [ ] list_object_policies +- [ ] list_outgoing_typed_links +- [ ] list_policy_attachments +- [ ] list_published_schema_arns +- [ ] list_tags_for_resource +- [ ] list_typed_link_facet_attributes +- [ ] list_typed_link_facet_names +- [ ] lookup_policy +- [ ] publish_schema +- [ ] put_schema_from_json +- [ ] remove_facet_from_object +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_facet +- [ ] update_object_attributes +- [ ] update_schema +- [ ] update_typed_link_facet + +## cloudformation - 17% implemented +- [ ] cancel_update_stack +- [ ] continue_update_rollback +- [ ] create_change_set +- [X] create_stack +- [ ] create_stack_instances +- [ ] create_stack_set +- [ ] delete_change_set +- [X] delete_stack +- [ ] delete_stack_instances +- [ ] delete_stack_set +- [ ] describe_account_limits +- [ ] describe_change_set +- [ ] describe_stack_events +- [ ] describe_stack_instance +- [ ] describe_stack_resource +- [ ] describe_stack_resources +- [ ] describe_stack_set +- [ ] describe_stack_set_operation +- [X] describe_stacks +- [ ] estimate_template_cost +- [ ] execute_change_set +- [ ] get_stack_policy +- [ ] get_template +- [ ] get_template_summary +- [ ] list_change_sets +- [X] list_exports +- [ ] list_imports +- [ ] list_stack_instances +- [X] list_stack_resources +- [ ] list_stack_set_operation_results +- [ ] list_stack_set_operations +- [ ] list_stack_sets +- [X] list_stacks +- [ ] set_stack_policy +- [ ] signal_resource +- [ ] stop_stack_set_operation +- [X] update_stack +- [ ] update_stack_set +- [ ] update_termination_protection +- [ ] validate_template + +## cloudfront - 0% implemented +- [ ] create_cloud_front_origin_access_identity +- [ ] create_distribution +- [ ] create_distribution_with_tags +- [ ] create_invalidation +- [ ] create_streaming_distribution +- [ ] create_streaming_distribution_with_tags +- [ ] delete_cloud_front_origin_access_identity +- [ ] delete_distribution +- [ ] delete_service_linked_role +- [ ] delete_streaming_distribution +- [ ] get_cloud_front_origin_access_identity +- [ ] get_cloud_front_origin_access_identity_config +- [ ] get_distribution +- [ ] get_distribution_config +- [ ] get_invalidation +- [ ] get_streaming_distribution +- [ ] get_streaming_distribution_config +- [ ] list_cloud_front_origin_access_identities +- [ ] list_distributions +- [ ] list_distributions_by_web_acl_id +- [ ] list_invalidations +- [ ] list_streaming_distributions +- [ ] list_tags_for_resource +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cloud_front_origin_access_identity +- [ ] update_distribution +- [ ] update_streaming_distribution + +## cloudhsm - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_hapg +- [ ] create_hsm +- [ ] create_luna_client +- [ ] delete_hapg +- [ ] delete_hsm +- [ ] delete_luna_client +- [ ] describe_hapg +- [ ] describe_hsm +- [ ] describe_luna_client +- [ ] get_config +- [ ] list_available_zones +- [ ] list_hapgs +- [ ] list_hsms +- [ ] list_luna_clients +- [ ] list_tags_for_resource +- [ ] modify_hapg +- [ ] modify_hsm +- [ ] modify_luna_client +- [ ] remove_tags_from_resource + +## cloudhsmv2 - 0% implemented +- [ ] create_cluster +- [ ] create_hsm +- [ ] delete_cluster +- [ ] delete_hsm +- [ ] describe_backups +- [ ] describe_clusters +- [ ] initialize_cluster +- [ ] list_tags +- [ ] tag_resource +- [ ] untag_resource + +## cloudsearch - 0% implemented +- [ ] build_suggesters +- [ ] create_domain +- [ ] define_analysis_scheme +- [ ] define_expression +- [ ] define_index_field +- [ ] define_suggester +- [ ] delete_analysis_scheme +- [ ] delete_domain +- [ ] delete_expression +- [ ] delete_index_field +- [ ] delete_suggester +- [ ] describe_analysis_schemes +- [ ] describe_availability_options +- [ ] describe_domains +- [ ] describe_expressions +- [ ] describe_index_fields +- [ ] describe_scaling_parameters +- [ ] describe_service_access_policies +- [ ] describe_suggesters +- [ ] index_documents +- [ ] list_domain_names +- [ ] update_availability_options +- [ ] update_scaling_parameters +- [ ] update_service_access_policies + +## cloudsearchdomain - 0% implemented +- [ ] search +- [ ] suggest +- [ ] upload_documents + +## cloudtrail - 0% implemented +- [ ] add_tags +- [ ] create_trail +- [ ] delete_trail +- [ ] describe_trails +- [ ] get_event_selectors +- [ ] get_trail_status +- [ ] list_public_keys +- [ ] list_tags +- [ ] lookup_events +- [ ] put_event_selectors +- [ ] remove_tags +- [ ] start_logging +- [ ] stop_logging +- [ ] update_trail + +## cloudwatch - 53% implemented +- [X] delete_alarms +- [X] delete_dashboards +- [ ] describe_alarm_history +- [ ] describe_alarms +- [ ] describe_alarms_for_metric +- [ ] disable_alarm_actions +- [ ] enable_alarm_actions +- [X] get_dashboard +- [ ] get_metric_statistics +- [X] list_dashboards +- [ ] list_metrics +- [X] put_dashboard +- [X] put_metric_alarm +- [X] put_metric_data +- [X] set_alarm_state + +## codebuild - 0% implemented +- [ ] batch_delete_builds +- [ ] batch_get_builds +- [ ] batch_get_projects +- [ ] create_project +- [ ] create_webhook +- [ ] delete_project +- [ ] delete_webhook +- [ ] list_builds +- [ ] list_builds_for_project +- [ ] list_curated_environment_images +- [ ] list_projects +- [ ] start_build +- [ ] stop_build +- [ ] update_project + +## codecommit - 0% implemented +- [ ] batch_get_repositories +- [ ] create_branch +- [ ] create_repository +- [ ] delete_branch +- [ ] delete_repository +- [ ] get_blob +- [ ] get_branch +- [ ] get_commit +- [ ] get_differences +- [ ] get_repository +- [ ] get_repository_triggers +- [ ] list_branches +- [ ] list_repositories +- [ ] put_repository_triggers +- [ ] test_repository_triggers +- [ ] update_default_branch +- [ ] update_repository_description +- [ ] update_repository_name + +## codedeploy - 0% implemented +- [ ] add_tags_to_on_premises_instances +- [ ] batch_get_application_revisions +- [ ] batch_get_applications +- [ ] batch_get_deployment_groups +- [ ] batch_get_deployment_instances +- [ ] batch_get_deployments +- [ ] batch_get_on_premises_instances +- [ ] continue_deployment +- [ ] create_application +- [ ] create_deployment +- [ ] create_deployment_config +- [ ] create_deployment_group +- [ ] delete_application +- [ ] delete_deployment_config +- [ ] delete_deployment_group +- [ ] deregister_on_premises_instance +- [ ] get_application +- [ ] get_application_revision +- [ ] get_deployment +- [ ] get_deployment_config +- [ ] get_deployment_group +- [ ] get_deployment_instance +- [ ] get_on_premises_instance +- [ ] list_application_revisions +- [ ] list_applications +- [ ] list_deployment_configs +- [ ] list_deployment_groups +- [ ] list_deployment_instances +- [ ] list_deployments +- [ ] list_git_hub_account_token_names +- [ ] list_on_premises_instances +- [ ] register_application_revision +- [ ] register_on_premises_instance +- [ ] remove_tags_from_on_premises_instances +- [ ] skip_wait_time_for_instance_termination +- [ ] stop_deployment +- [ ] update_application +- [ ] update_deployment_group + +## codepipeline - 0% implemented +- [ ] acknowledge_job +- [ ] acknowledge_third_party_job +- [ ] create_custom_action_type +- [ ] create_pipeline +- [ ] delete_custom_action_type +- [ ] delete_pipeline +- [ ] disable_stage_transition +- [ ] enable_stage_transition +- [ ] get_job_details +- [ ] get_pipeline +- [ ] get_pipeline_execution +- [ ] get_pipeline_state +- [ ] get_third_party_job_details +- [ ] list_action_types +- [ ] list_pipeline_executions +- [ ] list_pipelines +- [ ] poll_for_jobs +- [ ] poll_for_third_party_jobs +- [ ] put_action_revision +- [ ] put_approval_result +- [ ] put_job_failure_result +- [ ] put_job_success_result +- [ ] put_third_party_job_failure_result +- [ ] put_third_party_job_success_result +- [ ] retry_stage_execution +- [ ] start_pipeline_execution +- [ ] update_pipeline + +## codestar - 0% implemented +- [ ] associate_team_member +- [ ] create_project +- [ ] create_user_profile +- [ ] delete_project +- [ ] delete_user_profile +- [ ] describe_project +- [ ] describe_user_profile +- [ ] disassociate_team_member +- [ ] list_projects +- [ ] list_resources +- [ ] list_tags_for_project +- [ ] list_team_members +- [ ] list_user_profiles +- [ ] tag_project +- [ ] untag_project +- [ ] update_project +- [ ] update_team_member +- [ ] update_user_profile + +## cognito-identity - 0% implemented +- [ ] create_identity_pool +- [ ] delete_identities +- [ ] delete_identity_pool +- [ ] describe_identity +- [ ] describe_identity_pool +- [ ] get_credentials_for_identity +- [ ] get_id +- [ ] get_identity_pool_roles +- [ ] get_open_id_token +- [ ] get_open_id_token_for_developer_identity +- [ ] list_identities +- [ ] list_identity_pools +- [ ] lookup_developer_identity +- [ ] merge_developer_identities +- [ ] set_identity_pool_roles +- [ ] unlink_developer_identity +- [ ] unlink_identity +- [ ] update_identity_pool + +## cognito-idp - 0% implemented +- [ ] add_custom_attributes +- [ ] admin_add_user_to_group +- [ ] admin_confirm_sign_up +- [ ] admin_create_user +- [ ] admin_delete_user +- [ ] admin_delete_user_attributes +- [ ] admin_disable_provider_for_user +- [ ] admin_disable_user +- [ ] admin_enable_user +- [ ] admin_forget_device +- [ ] admin_get_device +- [ ] admin_get_user +- [ ] admin_initiate_auth +- [ ] admin_link_provider_for_user +- [ ] admin_list_devices +- [ ] admin_list_groups_for_user +- [ ] admin_remove_user_from_group +- [ ] admin_reset_user_password +- [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_settings +- [ ] admin_update_device_status +- [ ] admin_update_user_attributes +- [ ] admin_user_global_sign_out +- [ ] change_password +- [ ] confirm_device +- [ ] confirm_forgot_password +- [ ] confirm_sign_up +- [ ] create_group +- [ ] create_identity_provider +- [ ] create_resource_server +- [ ] create_user_import_job +- [ ] create_user_pool +- [ ] create_user_pool_client +- [ ] create_user_pool_domain +- [ ] delete_group +- [ ] delete_identity_provider +- [ ] delete_resource_server +- [ ] delete_user +- [ ] delete_user_attributes +- [ ] delete_user_pool +- [ ] delete_user_pool_client +- [ ] delete_user_pool_domain +- [ ] describe_identity_provider +- [ ] describe_resource_server +- [ ] describe_user_import_job +- [ ] describe_user_pool +- [ ] describe_user_pool_client +- [ ] describe_user_pool_domain +- [ ] forget_device +- [ ] forgot_password +- [ ] get_csv_header +- [ ] get_device +- [ ] get_group +- [ ] get_identity_provider_by_identifier +- [ ] get_ui_customization +- [ ] get_user +- [ ] get_user_attribute_verification_code +- [ ] global_sign_out +- [ ] initiate_auth +- [ ] list_devices +- [ ] list_groups +- [ ] list_identity_providers +- [ ] list_resource_servers +- [ ] list_user_import_jobs +- [ ] list_user_pool_clients +- [ ] list_user_pools +- [ ] list_users +- [ ] list_users_in_group +- [ ] resend_confirmation_code +- [ ] respond_to_auth_challenge +- [ ] set_ui_customization +- [ ] set_user_settings +- [ ] sign_up +- [ ] start_user_import_job +- [ ] stop_user_import_job +- [ ] update_device_status +- [ ] update_group +- [ ] update_identity_provider +- [ ] update_resource_server +- [ ] update_user_attributes +- [ ] update_user_pool +- [ ] update_user_pool_client +- [ ] verify_user_attribute + +## cognito-sync - 0% implemented +- [ ] bulk_publish +- [ ] delete_dataset +- [ ] describe_dataset +- [ ] describe_identity_pool_usage +- [ ] describe_identity_usage +- [ ] get_bulk_publish_details +- [ ] get_cognito_events +- [ ] get_identity_pool_configuration +- [ ] list_datasets +- [ ] list_identity_pool_usage +- [ ] list_records +- [ ] register_device +- [ ] set_cognito_events +- [ ] set_identity_pool_configuration +- [ ] subscribe_to_dataset +- [ ] unsubscribe_from_dataset +- [ ] update_records + +## config - 0% implemented +- [ ] delete_config_rule +- [ ] delete_configuration_recorder +- [ ] delete_delivery_channel +- [ ] delete_evaluation_results +- [ ] deliver_config_snapshot +- [ ] describe_compliance_by_config_rule +- [ ] describe_compliance_by_resource +- [ ] describe_config_rule_evaluation_status +- [ ] describe_config_rules +- [ ] describe_configuration_recorder_status +- [ ] describe_configuration_recorders +- [ ] describe_delivery_channel_status +- [ ] describe_delivery_channels +- [ ] get_compliance_details_by_config_rule +- [ ] get_compliance_details_by_resource +- [ ] get_compliance_summary_by_config_rule +- [ ] get_compliance_summary_by_resource_type +- [ ] get_discovered_resource_counts +- [ ] get_resource_config_history +- [ ] list_discovered_resources +- [ ] put_config_rule +- [ ] put_configuration_recorder +- [ ] put_delivery_channel +- [ ] put_evaluations +- [ ] start_config_rules_evaluation +- [ ] start_configuration_recorder +- [ ] stop_configuration_recorder + +## cur - 0% implemented +- [ ] delete_report_definition +- [ ] describe_report_definitions +- [ ] put_report_definition + +## datapipeline - 42% implemented +- [X] activate_pipeline +- [ ] add_tags +- [X] create_pipeline +- [ ] deactivate_pipeline +- [X] delete_pipeline +- [X] describe_objects +- [X] describe_pipelines +- [ ] evaluate_expression +- [X] get_pipeline_definition +- [X] list_pipelines +- [ ] poll_for_task +- [X] put_pipeline_definition +- [ ] query_objects +- [ ] remove_tags +- [ ] report_task_progress +- [ ] report_task_runner_heartbeat +- [ ] set_status +- [ ] set_task_status +- [ ] validate_pipeline_definition + +## dax - 0% implemented +- [ ] create_cluster +- [ ] create_parameter_group +- [ ] create_subnet_group +- [ ] decrease_replication_factor +- [ ] delete_cluster +- [ ] delete_parameter_group +- [ ] delete_subnet_group +- [ ] describe_clusters +- [ ] describe_default_parameters +- [ ] describe_events +- [ ] describe_parameter_groups +- [ ] describe_parameters +- [ ] describe_subnet_groups +- [ ] increase_replication_factor +- [ ] list_tags +- [ ] reboot_node +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_cluster +- [ ] update_parameter_group +- [ ] update_subnet_group + +## devicefarm - 0% implemented +- [ ] create_device_pool +- [ ] create_network_profile +- [ ] create_project +- [ ] create_remote_access_session +- [ ] create_upload +- [ ] delete_device_pool +- [ ] delete_network_profile +- [ ] delete_project +- [ ] delete_remote_access_session +- [ ] delete_run +- [ ] delete_upload +- [ ] get_account_settings +- [ ] get_device +- [ ] get_device_pool +- [ ] get_device_pool_compatibility +- [ ] get_job +- [ ] get_network_profile +- [ ] get_offering_status +- [ ] get_project +- [ ] get_remote_access_session +- [ ] get_run +- [ ] get_suite +- [ ] get_test +- [ ] get_upload +- [ ] install_to_remote_access_session +- [ ] list_artifacts +- [ ] list_device_pools +- [ ] list_devices +- [ ] list_jobs +- [ ] list_network_profiles +- [ ] list_offering_promotions +- [ ] list_offering_transactions +- [ ] list_offerings +- [ ] list_projects +- [ ] list_remote_access_sessions +- [ ] list_runs +- [ ] list_samples +- [ ] list_suites +- [ ] list_tests +- [ ] list_unique_problems +- [ ] list_uploads +- [ ] purchase_offering +- [ ] renew_offering +- [ ] schedule_run +- [ ] stop_remote_access_session +- [ ] stop_run +- [ ] update_device_pool +- [ ] update_network_profile +- [ ] update_project + +## directconnect - 0% implemented +- [ ] allocate_connection_on_interconnect +- [ ] allocate_hosted_connection +- [ ] allocate_private_virtual_interface +- [ ] allocate_public_virtual_interface +- [ ] associate_connection_with_lag +- [ ] associate_hosted_connection +- [ ] associate_virtual_interface +- [ ] confirm_connection +- [ ] confirm_private_virtual_interface +- [ ] confirm_public_virtual_interface +- [ ] create_bgp_peer +- [ ] create_connection +- [ ] create_direct_connect_gateway +- [ ] create_direct_connect_gateway_association +- [ ] create_interconnect +- [ ] create_lag +- [ ] create_private_virtual_interface +- [ ] create_public_virtual_interface +- [ ] delete_bgp_peer +- [ ] delete_connection +- [ ] delete_direct_connect_gateway +- [ ] delete_direct_connect_gateway_association +- [ ] delete_interconnect +- [ ] delete_lag +- [ ] delete_virtual_interface +- [ ] describe_connection_loa +- [ ] describe_connections +- [ ] describe_connections_on_interconnect +- [ ] describe_direct_connect_gateway_associations +- [ ] describe_direct_connect_gateway_attachments +- [ ] describe_direct_connect_gateways +- [ ] describe_hosted_connections +- [ ] describe_interconnect_loa +- [ ] describe_interconnects +- [ ] describe_lags +- [ ] describe_loa +- [ ] describe_locations +- [ ] describe_tags +- [ ] describe_virtual_gateways +- [ ] describe_virtual_interfaces +- [ ] disassociate_connection_from_lag +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_lag + +## discovery - 0% implemented +- [ ] associate_configuration_items_to_application +- [ ] create_application +- [ ] create_tags +- [ ] delete_applications +- [ ] delete_tags +- [ ] describe_agents +- [ ] describe_configurations +- [ ] describe_export_configurations +- [ ] describe_export_tasks +- [ ] describe_tags +- [ ] disassociate_configuration_items_from_application +- [ ] export_configurations +- [ ] get_discovery_summary +- [ ] list_configurations +- [ ] list_server_neighbors +- [ ] start_data_collection_by_agent_ids +- [ ] start_export_task +- [ ] stop_data_collection_by_agent_ids +- [ ] update_application + +## dms - 0% implemented +- [ ] add_tags_to_resource +- [ ] create_endpoint +- [ ] create_event_subscription +- [ ] create_replication_instance +- [ ] create_replication_subnet_group +- [ ] create_replication_task +- [ ] delete_certificate +- [ ] delete_endpoint +- [ ] delete_event_subscription +- [ ] delete_replication_instance +- [ ] delete_replication_subnet_group +- [ ] delete_replication_task +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_connections +- [ ] describe_endpoint_types +- [ ] describe_endpoints +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_orderable_replication_instances +- [ ] describe_refresh_schemas_status +- [ ] describe_replication_instances +- [ ] describe_replication_subnet_groups +- [ ] describe_replication_task_assessment_results +- [ ] describe_replication_tasks +- [ ] describe_schemas +- [ ] describe_table_statistics +- [ ] import_certificate +- [ ] list_tags_for_resource +- [ ] modify_endpoint +- [ ] modify_event_subscription +- [ ] modify_replication_instance +- [ ] modify_replication_subnet_group +- [ ] modify_replication_task +- [ ] refresh_schemas +- [ ] reload_tables +- [ ] remove_tags_from_resource +- [ ] start_replication_task +- [ ] start_replication_task_assessment +- [ ] stop_replication_task +- [ ] test_connection + +## ds - 0% implemented +- [ ] add_ip_routes +- [ ] add_tags_to_resource +- [ ] cancel_schema_extension +- [ ] connect_directory +- [ ] create_alias +- [ ] create_computer +- [ ] create_conditional_forwarder +- [ ] create_directory +- [ ] create_microsoft_ad +- [ ] create_snapshot +- [ ] create_trust +- [ ] delete_conditional_forwarder +- [ ] delete_directory +- [ ] delete_snapshot +- [ ] delete_trust +- [ ] deregister_event_topic +- [ ] describe_conditional_forwarders +- [ ] describe_directories +- [ ] describe_domain_controllers +- [ ] describe_event_topics +- [ ] describe_snapshots +- [ ] describe_trusts +- [ ] disable_radius +- [ ] disable_sso +- [ ] enable_radius +- [ ] enable_sso +- [ ] get_directory_limits +- [ ] get_snapshot_limits +- [ ] list_ip_routes +- [ ] list_schema_extensions +- [ ] list_tags_for_resource +- [ ] register_event_topic +- [ ] remove_ip_routes +- [ ] remove_tags_from_resource +- [ ] restore_from_snapshot +- [ ] start_schema_extension +- [ ] update_conditional_forwarder +- [ ] update_number_of_domain_controllers +- [ ] update_radius +- [ ] verify_trust + +## dynamodb - 36% implemented +- [ ] batch_get_item +- [ ] batch_write_item +- [X] create_table +- [X] delete_item +- [X] delete_table +- [ ] describe_limits +- [ ] describe_table +- [ ] describe_time_to_live +- [X] get_item +- [ ] list_tables +- [ ] list_tags_of_resource +- [X] put_item +- [X] query +- [X] scan +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_item +- [ ] update_table +- [ ] update_time_to_live + +## dynamodbstreams - 0% implemented +- [ ] describe_stream +- [ ] get_records +- [ ] get_shard_iterator +- [ ] list_streams + +## ec2 - 39% implemented +- [ ] accept_reserved_instances_exchange_quote +- [X] accept_vpc_peering_connection +- [X] allocate_address +- [ ] allocate_hosts +- [ ] assign_ipv6_addresses +- [ ] assign_private_ip_addresses +- [X] associate_address +- [X] associate_dhcp_options +- [ ] associate_iam_instance_profile +- [X] associate_route_table +- [ ] associate_subnet_cidr_block +- [ ] associate_vpc_cidr_block +- [ ] attach_classic_link_vpc +- [X] attach_internet_gateway +- [X] attach_network_interface +- [X] attach_volume +- [X] attach_vpn_gateway +- [X] authorize_security_group_egress +- [X] authorize_security_group_ingress +- [ ] bundle_instance +- [ ] cancel_bundle_task +- [ ] cancel_conversion_task +- [ ] cancel_export_task +- [ ] cancel_import_task +- [ ] cancel_reserved_instances_listing +- [X] cancel_spot_fleet_requests +- [X] cancel_spot_instance_requests +- [ ] confirm_product_instance +- [ ] copy_fpga_image +- [X] copy_image +- [ ] copy_snapshot +- [X] create_customer_gateway +- [ ] create_default_subnet +- [ ] create_default_vpc +- [X] create_dhcp_options +- [ ] create_egress_only_internet_gateway +- [ ] create_flow_logs +- [ ] create_fpga_image +- [X] create_image +- [ ] create_instance_export_task +- [X] create_internet_gateway +- [X] create_key_pair +- [X] create_nat_gateway +- [X] create_network_acl +- [X] create_network_acl_entry +- [X] create_network_interface +- [ ] create_network_interface_permission +- [ ] create_placement_group +- [ ] create_reserved_instances_listing +- [X] create_route +- [X] create_route_table +- [X] create_security_group +- [X] create_snapshot +- [ ] create_spot_datafeed_subscription +- [X] create_subnet +- [X] create_tags +- [X] create_volume +- [X] create_vpc +- [ ] create_vpc_endpoint +- [X] create_vpc_peering_connection +- [X] create_vpn_connection +- [ ] create_vpn_connection_route +- [X] create_vpn_gateway +- [X] delete_customer_gateway +- [ ] delete_dhcp_options +- [ ] delete_egress_only_internet_gateway +- [ ] delete_flow_logs +- [ ] delete_fpga_image +- [X] delete_internet_gateway +- [X] delete_key_pair +- [X] delete_nat_gateway +- [X] delete_network_acl +- [X] delete_network_acl_entry +- [X] delete_network_interface +- [ ] delete_network_interface_permission +- [ ] delete_placement_group +- [X] delete_route +- [X] delete_route_table +- [X] delete_security_group +- [X] delete_snapshot +- [ ] delete_spot_datafeed_subscription +- [X] delete_subnet +- [X] delete_tags +- [X] delete_volume +- [X] delete_vpc +- [ ] delete_vpc_endpoints +- [X] delete_vpc_peering_connection +- [X] delete_vpn_connection +- [ ] delete_vpn_connection_route +- [X] delete_vpn_gateway +- [X] deregister_image +- [ ] describe_account_attributes +- [X] describe_addresses +- [X] describe_availability_zones +- [ ] describe_bundle_tasks +- [ ] describe_classic_link_instances +- [ ] describe_conversion_tasks +- [ ] describe_customer_gateways +- [X] describe_dhcp_options +- [ ] describe_egress_only_internet_gateways +- [ ] describe_elastic_gpus +- [ ] describe_export_tasks +- [ ] describe_flow_logs +- [ ] describe_fpga_image_attribute +- [ ] describe_fpga_images +- [ ] describe_host_reservation_offerings +- [ ] describe_host_reservations +- [ ] describe_hosts +- [ ] describe_iam_instance_profile_associations +- [ ] describe_id_format +- [ ] describe_identity_id_format +- [ ] describe_image_attribute +- [X] describe_images +- [ ] describe_import_image_tasks +- [ ] describe_import_snapshot_tasks +- [X] describe_instance_attribute +- [ ] describe_instance_status +- [ ] describe_instances +- [X] describe_internet_gateways +- [X] describe_key_pairs +- [ ] describe_moving_addresses +- [ ] describe_nat_gateways +- [ ] describe_network_acls +- [ ] describe_network_interface_attribute +- [ ] describe_network_interface_permissions +- [X] describe_network_interfaces +- [ ] describe_placement_groups +- [ ] describe_prefix_lists +- [X] describe_regions +- [ ] describe_reserved_instances +- [ ] describe_reserved_instances_listings +- [ ] describe_reserved_instances_modifications +- [ ] describe_reserved_instances_offerings +- [ ] describe_route_tables +- [ ] describe_scheduled_instance_availability +- [ ] describe_scheduled_instances +- [ ] describe_security_group_references +- [X] describe_security_groups +- [ ] describe_snapshot_attribute +- [X] describe_snapshots +- [ ] describe_spot_datafeed_subscription +- [X] describe_spot_fleet_instances +- [ ] describe_spot_fleet_request_history +- [X] describe_spot_fleet_requests +- [X] describe_spot_instance_requests +- [ ] describe_spot_price_history +- [ ] describe_stale_security_groups +- [ ] describe_subnets +- [X] describe_tags +- [ ] describe_volume_attribute +- [ ] describe_volume_status +- [X] describe_volumes +- [ ] describe_volumes_modifications +- [X] describe_vpc_attribute +- [ ] describe_vpc_classic_link +- [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_services +- [ ] describe_vpc_endpoints +- [ ] describe_vpc_peering_connections +- [ ] describe_vpcs +- [X] describe_vpn_connections +- [ ] describe_vpn_gateways +- [ ] detach_classic_link_vpc +- [X] detach_internet_gateway +- [X] detach_network_interface +- [X] detach_volume +- [X] detach_vpn_gateway +- [ ] disable_vgw_route_propagation +- [ ] disable_vpc_classic_link +- [ ] disable_vpc_classic_link_dns_support +- [X] disassociate_address +- [ ] disassociate_iam_instance_profile +- [X] disassociate_route_table +- [ ] disassociate_subnet_cidr_block +- [ ] disassociate_vpc_cidr_block +- [ ] enable_vgw_route_propagation +- [ ] enable_volume_io +- [ ] enable_vpc_classic_link +- [ ] enable_vpc_classic_link_dns_support +- [ ] get_console_output +- [ ] get_console_screenshot +- [ ] get_host_reservation_purchase_preview +- [ ] get_password_data +- [ ] get_reserved_instances_exchange_quote +- [ ] import_image +- [ ] import_instance +- [X] import_key_pair +- [ ] import_snapshot +- [ ] import_volume +- [ ] modify_fpga_image_attribute +- [ ] modify_hosts +- [ ] modify_id_format +- [ ] modify_identity_id_format +- [ ] modify_image_attribute +- [X] modify_instance_attribute +- [ ] modify_instance_placement +- [X] modify_network_interface_attribute +- [ ] modify_reserved_instances +- [ ] modify_snapshot_attribute +- [X] modify_spot_fleet_request +- [X] modify_subnet_attribute +- [ ] modify_volume +- [ ] modify_volume_attribute +- [X] modify_vpc_attribute +- [ ] modify_vpc_endpoint +- [ ] modify_vpc_peering_connection_options +- [ ] modify_vpc_tenancy +- [ ] monitor_instances +- [ ] move_address_to_vpc +- [ ] purchase_host_reservation +- [ ] purchase_reserved_instances_offering +- [ ] purchase_scheduled_instances +- [X] reboot_instances +- [ ] register_image +- [X] reject_vpc_peering_connection +- [X] release_address +- [ ] release_hosts +- [ ] replace_iam_instance_profile_association +- [X] replace_network_acl_association +- [X] replace_network_acl_entry +- [X] replace_route +- [X] replace_route_table_association +- [ ] report_instance_status +- [X] request_spot_fleet +- [X] request_spot_instances +- [ ] reset_fpga_image_attribute +- [ ] reset_image_attribute +- [ ] reset_instance_attribute +- [ ] reset_network_interface_attribute +- [ ] reset_snapshot_attribute +- [ ] restore_address_to_classic +- [X] revoke_security_group_egress +- [X] revoke_security_group_ingress +- [ ] run_instances +- [ ] run_scheduled_instances +- [X] start_instances +- [X] stop_instances +- [X] terminate_instances +- [ ] unassign_ipv6_addresses +- [ ] unassign_private_ip_addresses +- [ ] unmonitor_instances +- [ ] update_security_group_rule_descriptions_egress +- [ ] update_security_group_rule_descriptions_ingress + +## ecr - 27% implemented +- [ ] batch_check_layer_availability +- [ ] batch_delete_image +- [ ] batch_get_image +- [ ] complete_layer_upload +- [X] create_repository +- [ ] delete_lifecycle_policy +- [X] delete_repository +- [ ] delete_repository_policy +- [X] describe_images +- [X] describe_repositories +- [ ] get_authorization_token +- [ ] get_download_url_for_layer +- [ ] get_lifecycle_policy +- [ ] get_lifecycle_policy_preview +- [ ] get_repository_policy +- [ ] initiate_layer_upload +- [X] list_images +- [X] put_image +- [ ] put_lifecycle_policy +- [ ] set_repository_policy +- [ ] start_lifecycle_policy_preview +- [ ] upload_layer_part + +## ecs - 87% implemented +- [X] create_cluster +- [X] create_service +- [X] delete_attributes +- [X] delete_cluster +- [X] delete_service +- [X] deregister_container_instance +- [X] deregister_task_definition +- [X] describe_clusters +- [X] describe_container_instances +- [X] describe_services +- [X] describe_task_definition +- [X] describe_tasks +- [ ] discover_poll_endpoint +- [X] list_attributes +- [X] list_clusters +- [X] list_container_instances +- [X] list_services +- [X] list_task_definition_families +- [X] list_task_definitions +- [X] list_tasks +- [X] put_attributes +- [X] register_container_instance +- [X] register_task_definition +- [X] run_task +- [X] start_task +- [X] stop_task +- [ ] submit_container_state_change +- [ ] submit_task_state_change +- [ ] update_container_agent +- [X] update_container_instances_state +- [X] update_service + +## efs - 0% implemented +- [ ] create_file_system +- [ ] create_mount_target +- [ ] create_tags +- [ ] delete_file_system +- [ ] delete_mount_target +- [ ] delete_tags +- [ ] describe_file_systems +- [ ] describe_mount_target_security_groups +- [ ] describe_mount_targets +- [ ] describe_tags +- [ ] modify_mount_target_security_groups + +## elasticache - 0% implemented +- [ ] add_tags_to_resource +- [ ] authorize_cache_security_group_ingress +- [ ] copy_snapshot +- [ ] create_cache_cluster +- [ ] create_cache_parameter_group +- [ ] create_cache_security_group +- [ ] create_cache_subnet_group +- [ ] create_replication_group +- [ ] create_snapshot +- [ ] delete_cache_cluster +- [ ] delete_cache_parameter_group +- [ ] delete_cache_security_group +- [ ] delete_cache_subnet_group +- [ ] delete_replication_group +- [ ] delete_snapshot +- [ ] describe_cache_clusters +- [ ] describe_cache_engine_versions +- [ ] describe_cache_parameter_groups +- [ ] describe_cache_parameters +- [ ] describe_cache_security_groups +- [ ] describe_cache_subnet_groups +- [ ] describe_engine_default_parameters +- [ ] describe_events +- [ ] describe_replication_groups +- [ ] describe_reserved_cache_nodes +- [ ] describe_reserved_cache_nodes_offerings +- [ ] describe_snapshots +- [ ] list_allowed_node_type_modifications +- [ ] list_tags_for_resource +- [ ] modify_cache_cluster +- [ ] modify_cache_parameter_group +- [ ] modify_cache_subnet_group +- [ ] modify_replication_group +- [ ] modify_replication_group_shard_configuration +- [ ] purchase_reserved_cache_nodes_offering +- [ ] reboot_cache_cluster +- [ ] remove_tags_from_resource +- [ ] reset_cache_parameter_group +- [ ] revoke_cache_security_group_ingress +- [ ] test_failover + +## elasticbeanstalk - 0% implemented +- [ ] abort_environment_update +- [ ] apply_environment_managed_action +- [ ] check_dns_availability +- [ ] compose_environments +- [ ] create_application +- [ ] create_application_version +- [ ] create_configuration_template +- [ ] create_environment +- [ ] create_platform_version +- [ ] create_storage_location +- [ ] delete_application +- [ ] delete_application_version +- [ ] delete_configuration_template +- [ ] delete_environment_configuration +- [ ] delete_platform_version +- [ ] describe_application_versions +- [ ] describe_applications +- [ ] describe_configuration_options +- [ ] describe_configuration_settings +- [ ] describe_environment_health +- [ ] describe_environment_managed_action_history +- [ ] describe_environment_managed_actions +- [ ] describe_environment_resources +- [ ] describe_environments +- [ ] describe_events +- [ ] describe_instances_health +- [ ] describe_platform_version +- [ ] list_available_solution_stacks +- [ ] list_platform_versions +- [ ] list_tags_for_resource +- [ ] rebuild_environment +- [ ] request_environment_info +- [ ] restart_app_server +- [ ] retrieve_environment_info +- [ ] swap_environment_cnames +- [ ] terminate_environment +- [ ] update_application +- [ ] update_application_resource_lifecycle +- [ ] update_application_version +- [ ] update_configuration_template +- [ ] update_environment +- [ ] update_tags_for_resource +- [ ] validate_configuration_settings + +## elastictranscoder - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_pipeline +- [ ] create_preset +- [ ] delete_pipeline +- [ ] delete_preset +- [ ] list_jobs_by_pipeline +- [ ] list_jobs_by_status +- [ ] list_pipelines +- [ ] list_presets +- [ ] read_job +- [ ] read_pipeline +- [ ] read_preset +- [ ] test_role +- [ ] update_pipeline +- [ ] update_pipeline_notifications +- [ ] update_pipeline_status + +## elb - 34% implemented +- [ ] add_tags +- [X] apply_security_groups_to_load_balancer +- [ ] attach_load_balancer_to_subnets +- [X] configure_health_check +- [X] create_app_cookie_stickiness_policy +- [X] create_lb_cookie_stickiness_policy +- [X] create_load_balancer +- [X] create_load_balancer_listeners +- [ ] create_load_balancer_policy +- [X] delete_load_balancer +- [X] delete_load_balancer_listeners +- [ ] delete_load_balancer_policy +- [ ] deregister_instances_from_load_balancer +- [ ] describe_account_limits +- [ ] describe_instance_health +- [ ] describe_load_balancer_attributes +- [ ] describe_load_balancer_policies +- [ ] describe_load_balancer_policy_types +- [X] describe_load_balancers +- [ ] describe_tags +- [ ] detach_load_balancer_from_subnets +- [ ] disable_availability_zones_for_load_balancer +- [ ] enable_availability_zones_for_load_balancer +- [ ] modify_load_balancer_attributes +- [ ] register_instances_with_load_balancer +- [ ] remove_tags +- [ ] set_load_balancer_listener_ssl_certificate +- [ ] set_load_balancer_policies_for_backend_server +- [X] set_load_balancer_policies_of_listener + +## elbv2 - 70% implemented +- [ ] add_listener_certificates +- [ ] add_tags +- [X] create_listener +- [X] create_load_balancer +- [X] create_rule +- [X] create_target_group +- [X] delete_listener +- [X] delete_load_balancer +- [X] delete_rule +- [X] delete_target_group +- [X] deregister_targets +- [ ] describe_account_limits +- [ ] describe_listener_certificates +- [X] describe_listeners +- [X] describe_load_balancer_attributes +- [X] describe_load_balancers +- [X] describe_rules +- [ ] describe_ssl_policies +- [ ] describe_tags +- [ ] describe_target_group_attributes +- [X] describe_target_groups +- [X] describe_target_health +- [X] modify_listener +- [X] modify_load_balancer_attributes +- [X] modify_rule +- [X] modify_target_group +- [ ] modify_target_group_attributes +- [X] register_targets +- [ ] remove_listener_certificates +- [ ] remove_tags +- [X] set_ip_address_type +- [X] set_rule_priorities +- [X] set_security_groups +- [X] set_subnets + +## emr - 55% implemented +- [ ] add_instance_fleet +- [X] add_instance_groups +- [X] add_job_flow_steps +- [X] add_tags +- [ ] cancel_steps +- [ ] create_security_configuration +- [ ] delete_security_configuration +- [ ] describe_cluster +- [X] describe_job_flows +- [ ] describe_security_configuration +- [X] describe_step +- [X] list_bootstrap_actions +- [X] list_clusters +- [ ] list_instance_fleets +- [X] list_instance_groups +- [ ] list_instances +- [ ] list_security_configurations +- [X] list_steps +- [ ] modify_instance_fleet +- [X] modify_instance_groups +- [ ] put_auto_scaling_policy +- [ ] remove_auto_scaling_policy +- [X] remove_tags +- [X] run_job_flow +- [X] set_termination_protection +- [X] set_visible_to_all_users +- [X] terminate_job_flows + +## es - 0% implemented +- [ ] add_tags +- [ ] create_elasticsearch_domain +- [ ] delete_elasticsearch_domain +- [ ] delete_elasticsearch_service_role +- [ ] describe_elasticsearch_domain +- [ ] describe_elasticsearch_domain_config +- [ ] describe_elasticsearch_domains +- [ ] describe_elasticsearch_instance_type_limits +- [ ] list_domain_names +- [ ] list_elasticsearch_instance_types +- [ ] list_elasticsearch_versions +- [ ] list_tags +- [ ] remove_tags +- [ ] update_elasticsearch_domain_config + +## events - 100% implemented +- [X] delete_rule +- [X] describe_event_bus +- [X] describe_rule +- [X] disable_rule +- [X] enable_rule +- [X] list_rule_names_by_target +- [X] list_rules +- [X] list_targets_by_rule +- [X] put_events +- [X] put_permission +- [X] put_rule +- [X] put_targets +- [X] remove_permission +- [X] remove_targets +- [X] test_event_pattern + +## firehose - 0% implemented +- [ ] create_delivery_stream +- [ ] delete_delivery_stream +- [ ] describe_delivery_stream +- [ ] get_kinesis_stream +- [ ] list_delivery_streams +- [ ] put_record +- [ ] put_record_batch +- [ ] update_destination + +## gamelift - 0% implemented +- [ ] accept_match +- [ ] create_alias +- [ ] create_build +- [ ] create_fleet +- [ ] create_game_session +- [ ] create_game_session_queue +- [ ] create_matchmaking_configuration +- [ ] create_matchmaking_rule_set +- [ ] create_player_session +- [ ] create_player_sessions +- [ ] create_vpc_peering_authorization +- [ ] create_vpc_peering_connection +- [ ] delete_alias +- [ ] delete_build +- [ ] delete_fleet +- [ ] delete_game_session_queue +- [ ] delete_matchmaking_configuration +- [ ] delete_scaling_policy +- [ ] delete_vpc_peering_authorization +- [ ] delete_vpc_peering_connection +- [ ] describe_alias +- [ ] describe_build +- [ ] describe_ec2_instance_limits +- [ ] describe_fleet_attributes +- [ ] describe_fleet_capacity +- [ ] describe_fleet_events +- [ ] describe_fleet_port_settings +- [ ] describe_fleet_utilization +- [ ] describe_game_session_details +- [ ] describe_game_session_placement +- [ ] describe_game_session_queues +- [ ] describe_game_sessions +- [ ] describe_instances +- [ ] describe_matchmaking +- [ ] describe_matchmaking_configurations +- [ ] describe_matchmaking_rule_sets +- [ ] describe_player_sessions +- [ ] describe_runtime_configuration +- [ ] describe_scaling_policies +- [ ] describe_vpc_peering_authorizations +- [ ] describe_vpc_peering_connections +- [ ] get_game_session_log_url +- [ ] get_instance_access +- [ ] list_aliases +- [ ] list_builds +- [ ] list_fleets +- [ ] put_scaling_policy +- [ ] request_upload_credentials +- [ ] resolve_alias +- [ ] search_game_sessions +- [ ] start_game_session_placement +- [ ] start_matchmaking +- [ ] stop_game_session_placement +- [ ] stop_matchmaking +- [ ] update_alias +- [ ] update_build +- [ ] update_fleet_attributes +- [ ] update_fleet_capacity +- [ ] update_fleet_port_settings +- [ ] update_game_session +- [ ] update_game_session_queue +- [ ] update_matchmaking_configuration +- [ ] update_runtime_configuration +- [ ] validate_matchmaking_rule_set + +## glacier - 12% implemented +- [ ] abort_multipart_upload +- [ ] abort_vault_lock +- [ ] add_tags_to_vault +- [ ] complete_multipart_upload +- [ ] complete_vault_lock +- [X] create_vault +- [ ] delete_archive +- [X] delete_vault +- [ ] delete_vault_access_policy +- [ ] delete_vault_notifications +- [ ] describe_job +- [ ] describe_vault +- [ ] get_data_retrieval_policy +- [ ] get_job_output +- [ ] get_vault_access_policy +- [ ] get_vault_lock +- [ ] get_vault_notifications +- [X] initiate_job +- [ ] initiate_multipart_upload +- [ ] initiate_vault_lock +- [X] list_jobs +- [ ] list_multipart_uploads +- [ ] list_parts +- [ ] list_provisioned_capacity +- [ ] list_tags_for_vault +- [ ] list_vaults +- [ ] purchase_provisioned_capacity +- [ ] remove_tags_from_vault +- [ ] set_data_retrieval_policy +- [ ] set_vault_access_policy +- [ ] set_vault_notifications +- [ ] upload_archive +- [ ] upload_multipart_part + +## glue - 0% implemented +- [ ] batch_create_partition +- [ ] batch_delete_connection +- [ ] batch_delete_partition +- [ ] batch_delete_table +- [ ] batch_get_partition +- [ ] batch_stop_job_run +- [ ] create_classifier +- [ ] create_connection +- [ ] create_crawler +- [ ] create_database +- [ ] create_dev_endpoint +- [ ] create_job +- [ ] create_partition +- [ ] create_script +- [ ] create_table +- [ ] create_trigger +- [ ] create_user_defined_function +- [ ] delete_classifier +- [ ] delete_connection +- [ ] delete_crawler +- [ ] delete_database +- [ ] delete_dev_endpoint +- [ ] delete_job +- [ ] delete_partition +- [ ] delete_table +- [ ] delete_trigger +- [ ] delete_user_defined_function +- [ ] get_catalog_import_status +- [ ] get_classifier +- [ ] get_classifiers +- [ ] get_connection +- [ ] get_connections +- [ ] get_crawler +- [ ] get_crawler_metrics +- [ ] get_crawlers +- [ ] get_database +- [ ] get_databases +- [ ] get_dataflow_graph +- [ ] get_dev_endpoint +- [ ] get_dev_endpoints +- [ ] get_job +- [ ] get_job_run +- [ ] get_job_runs +- [ ] get_jobs +- [ ] get_mapping +- [ ] get_partition +- [ ] get_partitions +- [ ] get_plan +- [ ] get_table +- [ ] get_table_versions +- [ ] get_tables +- [ ] get_trigger +- [ ] get_triggers +- [ ] get_user_defined_function +- [ ] get_user_defined_functions +- [ ] import_catalog_to_glue +- [ ] reset_job_bookmark +- [ ] start_crawler +- [ ] start_crawler_schedule +- [ ] start_job_run +- [ ] start_trigger +- [ ] stop_crawler +- [ ] stop_crawler_schedule +- [ ] stop_trigger +- [ ] update_classifier +- [ ] update_connection +- [ ] update_crawler +- [ ] update_crawler_schedule +- [ ] update_database +- [ ] update_dev_endpoint +- [ ] update_job +- [ ] update_partition +- [ ] update_table +- [ ] update_trigger +- [ ] update_user_defined_function + +## greengrass - 0% implemented +- [ ] associate_role_to_group +- [ ] associate_service_role_to_account +- [ ] create_core_definition +- [ ] create_core_definition_version +- [ ] create_deployment +- [ ] create_device_definition +- [ ] create_device_definition_version +- [ ] create_function_definition +- [ ] create_function_definition_version +- [ ] create_group +- [ ] create_group_certificate_authority +- [ ] create_group_version +- [ ] create_logger_definition +- [ ] create_logger_definition_version +- [ ] create_subscription_definition +- [ ] create_subscription_definition_version +- [ ] delete_core_definition +- [ ] delete_device_definition +- [ ] delete_function_definition +- [ ] delete_group +- [ ] delete_logger_definition +- [ ] delete_subscription_definition +- [ ] disassociate_role_from_group +- [ ] disassociate_service_role_from_account +- [ ] get_associated_role +- [ ] get_connectivity_info +- [ ] get_core_definition +- [ ] get_core_definition_version +- [ ] get_deployment_status +- [ ] get_device_definition +- [ ] get_device_definition_version +- [ ] get_function_definition +- [ ] get_function_definition_version +- [ ] get_group +- [ ] get_group_certificate_authority +- [ ] get_group_certificate_configuration +- [ ] get_group_version +- [ ] get_logger_definition +- [ ] get_logger_definition_version +- [ ] get_service_role_for_account +- [ ] get_subscription_definition +- [ ] get_subscription_definition_version +- [ ] list_core_definition_versions +- [ ] list_core_definitions +- [ ] list_deployments +- [ ] list_device_definition_versions +- [ ] list_device_definitions +- [ ] list_function_definition_versions +- [ ] list_function_definitions +- [ ] list_group_certificate_authorities +- [ ] list_group_versions +- [ ] list_groups +- [ ] list_logger_definition_versions +- [ ] list_logger_definitions +- [ ] list_subscription_definition_versions +- [ ] list_subscription_definitions +- [ ] reset_deployments +- [ ] update_connectivity_info +- [ ] update_core_definition +- [ ] update_device_definition +- [ ] update_function_definition +- [ ] update_group +- [ ] update_group_certificate_configuration +- [ ] update_logger_definition +- [ ] update_subscription_definition + +## health - 0% implemented +- [ ] describe_affected_entities +- [ ] describe_entity_aggregates +- [ ] describe_event_aggregates +- [ ] describe_event_details +- [ ] describe_event_types +- [ ] describe_events + +## iam - 46% implemented +- [ ] add_client_id_to_open_id_connect_provider +- [X] add_role_to_instance_profile +- [X] add_user_to_group +- [X] attach_group_policy +- [X] attach_role_policy +- [X] attach_user_policy +- [ ] change_password +- [X] create_access_key +- [X] create_account_alias +- [X] create_group +- [X] create_instance_profile +- [X] create_login_profile +- [ ] create_open_id_connect_provider +- [X] create_policy +- [X] create_policy_version +- [X] create_role +- [ ] create_saml_provider +- [ ] create_service_linked_role +- [ ] create_service_specific_credential +- [X] create_user +- [ ] create_virtual_mfa_device +- [X] deactivate_mfa_device +- [X] delete_access_key +- [X] delete_account_alias +- [ ] delete_account_password_policy +- [ ] delete_group +- [ ] delete_group_policy +- [ ] delete_instance_profile +- [X] delete_login_profile +- [ ] delete_open_id_connect_provider +- [ ] delete_policy +- [X] delete_policy_version +- [X] delete_role +- [X] delete_role_policy +- [ ] delete_saml_provider +- [ ] delete_server_certificate +- [ ] delete_service_linked_role +- [ ] delete_service_specific_credential +- [ ] delete_signing_certificate +- [ ] delete_ssh_public_key +- [X] delete_user +- [X] delete_user_policy +- [ ] delete_virtual_mfa_device +- [X] detach_group_policy +- [X] detach_role_policy +- [X] detach_user_policy +- [X] enable_mfa_device +- [ ] generate_credential_report +- [ ] get_access_key_last_used +- [ ] get_account_authorization_details +- [ ] get_account_password_policy +- [ ] get_account_summary +- [ ] get_context_keys_for_custom_policy +- [ ] get_context_keys_for_principal_policy +- [X] get_credential_report +- [X] get_group +- [X] get_group_policy +- [X] get_instance_profile +- [X] get_login_profile +- [ ] get_open_id_connect_provider +- [X] get_policy +- [X] get_policy_version +- [X] get_role +- [X] get_role_policy +- [ ] get_saml_provider +- [X] get_server_certificate +- [ ] get_service_linked_role_deletion_status +- [ ] get_ssh_public_key +- [X] get_user +- [X] get_user_policy +- [ ] list_access_keys +- [X] list_account_aliases +- [X] list_attached_group_policies +- [X] list_attached_role_policies +- [X] list_attached_user_policies +- [ ] list_entities_for_policy +- [X] list_group_policies +- [X] list_groups +- [ ] list_groups_for_user +- [ ] list_instance_profiles +- [ ] list_instance_profiles_for_role +- [X] list_mfa_devices +- [ ] list_open_id_connect_providers +- [X] list_policies +- [X] list_policy_versions +- [X] list_role_policies +- [ ] list_roles +- [ ] list_saml_providers +- [ ] list_server_certificates +- [ ] list_service_specific_credentials +- [ ] list_signing_certificates +- [ ] list_ssh_public_keys +- [X] list_user_policies +- [X] list_users +- [ ] list_virtual_mfa_devices +- [X] put_group_policy +- [X] put_role_policy +- [X] put_user_policy +- [ ] remove_client_id_from_open_id_connect_provider +- [X] remove_role_from_instance_profile +- [X] remove_user_from_group +- [ ] reset_service_specific_credential +- [ ] resync_mfa_device +- [ ] set_default_policy_version +- [ ] simulate_custom_policy +- [ ] simulate_principal_policy +- [ ] update_access_key +- [ ] update_account_password_policy +- [ ] update_assume_role_policy +- [ ] update_group +- [X] update_login_profile +- [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role_description +- [ ] update_saml_provider +- [ ] update_server_certificate +- [ ] update_service_specific_credential +- [ ] update_signing_certificate +- [ ] update_ssh_public_key +- [ ] update_user +- [ ] upload_server_certificate +- [ ] upload_signing_certificate +- [ ] upload_ssh_public_key + +## importexport - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] get_shipping_label +- [ ] get_status +- [ ] list_jobs +- [ ] update_job + +## inspector - 0% implemented +- [ ] add_attributes_to_findings +- [ ] create_assessment_target +- [ ] create_assessment_template +- [ ] create_resource_group +- [ ] delete_assessment_run +- [ ] delete_assessment_target +- [ ] delete_assessment_template +- [ ] describe_assessment_runs +- [ ] describe_assessment_targets +- [ ] describe_assessment_templates +- [ ] describe_cross_account_access_role +- [ ] describe_findings +- [ ] describe_resource_groups +- [ ] describe_rules_packages +- [ ] get_assessment_report +- [ ] get_telemetry_metadata +- [ ] list_assessment_run_agents +- [ ] list_assessment_runs +- [ ] list_assessment_targets +- [ ] list_assessment_templates +- [ ] list_event_subscriptions +- [ ] list_findings +- [ ] list_rules_packages +- [ ] list_tags_for_resource +- [ ] preview_agents +- [ ] register_cross_account_access_role +- [ ] remove_attributes_from_findings +- [ ] set_tags_for_resource +- [ ] start_assessment_run +- [ ] stop_assessment_run +- [ ] subscribe_to_event +- [ ] unsubscribe_from_event +- [ ] update_assessment_target + +## iot - 45% implemented +- [ ] accept_certificate_transfer +- [X] attach_principal_policy +- [X] attach_thing_principal +- [ ] cancel_certificate_transfer +- [ ] create_certificate_from_csr +- [X] create_keys_and_certificate +- [X] create_policy +- [ ] create_policy_version +- [X] create_thing +- [X] create_thing_type +- [ ] create_topic_rule +- [ ] delete_ca_certificate +- [X] delete_certificate +- [X] delete_policy +- [ ] delete_policy_version +- [ ] delete_registration_code +- [X] delete_thing +- [X] delete_thing_type +- [ ] delete_topic_rule +- [ ] deprecate_thing_type +- [ ] describe_ca_certificate +- [X] describe_certificate +- [ ] describe_endpoint +- [X] describe_thing +- [X] describe_thing_type +- [X] detach_principal_policy +- [X] detach_thing_principal +- [ ] disable_topic_rule +- [ ] enable_topic_rule +- [ ] get_logging_options +- [X] get_policy +- [ ] get_policy_version +- [ ] get_registration_code +- [ ] get_topic_rule +- [ ] list_ca_certificates +- [X] list_certificates +- [ ] list_certificates_by_ca +- [ ] list_outgoing_certificates +- [X] list_policies +- [X] list_policy_principals +- [ ] list_policy_versions +- [X] list_principal_policies +- [X] list_principal_things +- [X] list_thing_principals +- [X] list_thing_types +- [X] list_things +- [ ] list_topic_rules +- [ ] register_ca_certificate +- [ ] register_certificate +- [ ] reject_certificate_transfer +- [ ] replace_topic_rule +- [ ] set_default_policy_version +- [ ] set_logging_options +- [ ] transfer_certificate +- [ ] update_ca_certificate +- [X] update_certificate +- [X] update_thing + +## iot-data - 0% implemented +- [ ] delete_thing_shadow +- [ ] get_thing_shadow +- [ ] publish +- [ ] update_thing_shadow + +## kinesis - 61% implemented +- [X] add_tags_to_stream +- [X] create_stream +- [ ] decrease_stream_retention_period +- [X] delete_stream +- [ ] describe_limits +- [X] describe_stream +- [ ] disable_enhanced_monitoring +- [ ] enable_enhanced_monitoring +- [X] get_records +- [X] get_shard_iterator +- [ ] increase_stream_retention_period +- [X] list_streams +- [X] list_tags_for_stream +- [X] merge_shards +- [X] put_record +- [X] put_records +- [X] remove_tags_from_stream +- [X] split_shard +- [ ] start_stream_encryption +- [ ] stop_stream_encryption +- [ ] update_shard_count + +## kinesisanalytics - 0% implemented +- [ ] add_application_cloud_watch_logging_option +- [ ] add_application_input +- [ ] add_application_input_processing_configuration +- [ ] add_application_output +- [ ] add_application_reference_data_source +- [ ] create_application +- [ ] delete_application +- [ ] delete_application_cloud_watch_logging_option +- [ ] delete_application_input_processing_configuration +- [ ] delete_application_output +- [ ] delete_application_reference_data_source +- [ ] describe_application +- [ ] discover_input_schema +- [ ] list_applications +- [ ] start_application +- [ ] stop_application +- [ ] update_application + +## kms - 25% implemented +- [ ] cancel_key_deletion +- [ ] create_alias +- [ ] create_grant +- [X] create_key +- [ ] decrypt +- [X] delete_alias +- [ ] delete_imported_key_material +- [X] describe_key +- [ ] disable_key +- [X] disable_key_rotation +- [ ] enable_key +- [X] enable_key_rotation +- [ ] encrypt +- [ ] generate_data_key +- [ ] generate_data_key_without_plaintext +- [ ] generate_random +- [X] get_key_policy +- [X] get_key_rotation_status +- [ ] get_parameters_for_import +- [ ] import_key_material +- [ ] list_aliases +- [ ] list_grants +- [ ] list_key_policies +- [X] list_keys +- [ ] list_resource_tags +- [ ] list_retirable_grants +- [X] put_key_policy +- [ ] re_encrypt +- [ ] retire_grant +- [ ] revoke_grant +- [ ] schedule_key_deletion +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_key_description + +## lambda - 0% implemented +- [ ] add_permission +- [ ] create_alias +- [ ] create_event_source_mapping +- [ ] create_function +- [ ] delete_alias +- [ ] delete_event_source_mapping +- [ ] delete_function +- [ ] get_account_settings +- [ ] get_alias +- [ ] get_event_source_mapping +- [ ] get_function +- [ ] get_function_configuration +- [ ] get_policy +- [ ] invoke +- [ ] invoke_async +- [ ] list_aliases +- [ ] list_event_source_mappings +- [ ] list_functions +- [ ] list_tags +- [ ] list_versions_by_function +- [ ] publish_version +- [ ] remove_permission +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_alias +- [ ] update_event_source_mapping +- [ ] update_function_code +- [ ] update_function_configuration + +## lex-models - 0% implemented +- [ ] create_bot_version +- [ ] create_intent_version +- [ ] create_slot_type_version +- [ ] delete_bot +- [ ] delete_bot_alias +- [ ] delete_bot_channel_association +- [ ] delete_bot_version +- [ ] delete_intent +- [ ] delete_intent_version +- [ ] delete_slot_type +- [ ] delete_slot_type_version +- [ ] delete_utterances +- [ ] get_bot +- [ ] get_bot_alias +- [ ] get_bot_aliases +- [ ] get_bot_channel_association +- [ ] get_bot_channel_associations +- [ ] get_bot_versions +- [ ] get_bots +- [ ] get_builtin_intent +- [ ] get_builtin_intents +- [ ] get_builtin_slot_types +- [ ] get_export +- [ ] get_intent +- [ ] get_intent_versions +- [ ] get_intents +- [ ] get_slot_type +- [ ] get_slot_type_versions +- [ ] get_slot_types +- [ ] get_utterances_view +- [ ] put_bot +- [ ] put_bot_alias +- [ ] put_intent +- [ ] put_slot_type + +## lex-runtime - 0% implemented +- [ ] post_content +- [ ] post_text + +## lightsail - 0% implemented +- [ ] allocate_static_ip +- [ ] attach_disk +- [ ] attach_static_ip +- [ ] close_instance_public_ports +- [ ] create_disk +- [ ] create_disk_from_snapshot +- [ ] create_disk_snapshot +- [ ] create_domain +- [ ] create_domain_entry +- [ ] create_instance_snapshot +- [ ] create_instances +- [ ] create_instances_from_snapshot +- [ ] create_key_pair +- [ ] delete_disk +- [ ] delete_disk_snapshot +- [ ] delete_domain +- [ ] delete_domain_entry +- [ ] delete_instance +- [ ] delete_instance_snapshot +- [ ] delete_key_pair +- [ ] detach_disk +- [ ] detach_static_ip +- [ ] download_default_key_pair +- [ ] get_active_names +- [ ] get_blueprints +- [ ] get_bundles +- [ ] get_disk +- [ ] get_disk_snapshot +- [ ] get_disk_snapshots +- [ ] get_disks +- [ ] get_domain +- [ ] get_domains +- [ ] get_instance +- [ ] get_instance_access_details +- [ ] get_instance_metric_data +- [ ] get_instance_port_states +- [ ] get_instance_snapshot +- [ ] get_instance_snapshots +- [ ] get_instance_state +- [ ] get_instances +- [ ] get_key_pair +- [ ] get_key_pairs +- [ ] get_operation +- [ ] get_operations +- [ ] get_operations_for_resource +- [ ] get_regions +- [ ] get_static_ip +- [ ] get_static_ips +- [ ] import_key_pair +- [ ] is_vpc_peered +- [ ] open_instance_public_ports +- [ ] peer_vpc +- [ ] put_instance_public_ports +- [ ] reboot_instance +- [ ] release_static_ip +- [ ] start_instance +- [ ] stop_instance +- [ ] unpeer_vpc +- [ ] update_domain_entry + +## logs - 24% implemented +- [ ] associate_kms_key +- [ ] cancel_export_task +- [ ] create_export_task +- [X] create_log_group +- [X] create_log_stream +- [ ] delete_destination +- [X] delete_log_group +- [X] delete_log_stream +- [ ] delete_metric_filter +- [ ] delete_resource_policy +- [ ] delete_retention_policy +- [ ] delete_subscription_filter +- [ ] describe_destinations +- [ ] describe_export_tasks +- [ ] describe_log_groups +- [X] describe_log_streams +- [ ] describe_metric_filters +- [ ] describe_resource_policies +- [ ] describe_subscription_filters +- [ ] disassociate_kms_key +- [X] filter_log_events +- [X] get_log_events +- [ ] list_tags_log_group +- [ ] put_destination +- [ ] put_destination_policy +- [X] put_log_events +- [ ] put_metric_filter +- [ ] put_resource_policy +- [ ] put_retention_policy +- [ ] put_subscription_filter +- [ ] tag_log_group +- [ ] test_metric_filter +- [ ] untag_log_group + +## machinelearning - 0% implemented +- [ ] add_tags +- [ ] create_batch_prediction +- [ ] create_data_source_from_rds +- [ ] create_data_source_from_redshift +- [ ] create_data_source_from_s3 +- [ ] create_evaluation +- [ ] create_ml_model +- [ ] create_realtime_endpoint +- [ ] delete_batch_prediction +- [ ] delete_data_source +- [ ] delete_evaluation +- [ ] delete_ml_model +- [ ] delete_realtime_endpoint +- [ ] delete_tags +- [ ] describe_batch_predictions +- [ ] describe_data_sources +- [ ] describe_evaluations +- [ ] describe_ml_models +- [ ] describe_tags +- [ ] get_batch_prediction +- [ ] get_data_source +- [ ] get_evaluation +- [ ] get_ml_model +- [ ] predict +- [ ] update_batch_prediction +- [ ] update_data_source +- [ ] update_evaluation +- [ ] update_ml_model + +## marketplace-entitlement - 0% implemented +- [ ] get_entitlements + +## marketplacecommerceanalytics - 0% implemented +- [ ] generate_data_set +- [ ] start_support_data_export + +## meteringmarketplace - 0% implemented +- [ ] batch_meter_usage +- [ ] meter_usage +- [ ] resolve_customer + +## mgh - 0% implemented +- [ ] associate_created_artifact +- [ ] associate_discovered_resource +- [ ] create_progress_update_stream +- [ ] delete_progress_update_stream +- [ ] describe_application_state +- [ ] describe_migration_task +- [ ] disassociate_created_artifact +- [ ] disassociate_discovered_resource +- [ ] import_migration_task +- [ ] list_created_artifacts +- [ ] list_discovered_resources +- [ ] list_migration_tasks +- [ ] list_progress_update_streams +- [ ] notify_application_state +- [ ] notify_migration_task_state +- [ ] put_resource_attributes + +## mobile - 0% implemented +- [ ] create_project +- [ ] delete_project +- [ ] describe_bundle +- [ ] describe_project +- [ ] export_bundle +- [ ] export_project +- [ ] list_bundles +- [ ] list_projects +- [ ] update_project + +## mturk - 0% implemented +- [ ] accept_qualification_request +- [ ] approve_assignment +- [ ] associate_qualification_with_worker +- [ ] create_additional_assignments_for_hit +- [ ] create_hit +- [ ] create_hit_type +- [ ] create_hit_with_hit_type +- [ ] create_qualification_type +- [ ] create_worker_block +- [ ] delete_hit +- [ ] delete_qualification_type +- [ ] delete_worker_block +- [ ] disassociate_qualification_from_worker +- [ ] get_account_balance +- [ ] get_assignment +- [ ] get_file_upload_url +- [ ] get_hit +- [ ] get_qualification_score +- [ ] get_qualification_type +- [ ] list_assignments_for_hit +- [ ] list_bonus_payments +- [ ] list_hits +- [ ] list_hits_for_qualification_type +- [ ] list_qualification_requests +- [ ] list_qualification_types +- [ ] list_review_policy_results_for_hit +- [ ] list_reviewable_hits +- [ ] list_worker_blocks +- [ ] list_workers_with_qualification_type +- [ ] notify_workers +- [ ] reject_assignment +- [ ] reject_qualification_request +- [ ] send_bonus +- [ ] send_test_event_notification +- [ ] update_expiration_for_hit +- [ ] update_hit_review_status +- [ ] update_hit_type_of_hit +- [ ] update_notification_settings +- [ ] update_qualification_type + +## opsworks - 9% implemented +- [ ] assign_instance +- [ ] assign_volume +- [ ] associate_elastic_ip +- [ ] attach_elastic_load_balancer +- [ ] clone_stack +- [ ] create_app +- [ ] create_deployment +- [X] create_instance +- [X] create_layer +- [X] create_stack +- [ ] create_user_profile +- [ ] delete_app +- [ ] delete_instance +- [ ] delete_layer +- [ ] delete_stack +- [ ] delete_user_profile +- [ ] deregister_ecs_cluster +- [ ] deregister_elastic_ip +- [ ] deregister_instance +- [ ] deregister_rds_db_instance +- [ ] deregister_volume +- [ ] describe_agent_versions +- [ ] describe_apps +- [ ] describe_commands +- [ ] describe_deployments +- [ ] describe_ecs_clusters +- [ ] describe_elastic_ips +- [ ] describe_elastic_load_balancers +- [X] describe_instances +- [X] describe_layers +- [ ] describe_load_based_auto_scaling +- [ ] describe_my_user_profile +- [ ] describe_permissions +- [ ] describe_raid_arrays +- [ ] describe_rds_db_instances +- [ ] describe_service_errors +- [ ] describe_stack_provisioning_parameters +- [ ] describe_stack_summary +- [X] describe_stacks +- [ ] describe_time_based_auto_scaling +- [ ] describe_user_profiles +- [ ] describe_volumes +- [ ] detach_elastic_load_balancer +- [ ] disassociate_elastic_ip +- [ ] get_hostname_suggestion +- [ ] grant_access +- [ ] list_tags +- [ ] reboot_instance +- [ ] register_ecs_cluster +- [ ] register_elastic_ip +- [ ] register_instance +- [ ] register_rds_db_instance +- [ ] register_volume +- [ ] set_load_based_auto_scaling +- [ ] set_permission +- [ ] set_time_based_auto_scaling +- [X] start_instance +- [ ] start_stack +- [ ] stop_instance +- [ ] stop_stack +- [ ] tag_resource +- [ ] unassign_instance +- [ ] unassign_volume +- [ ] untag_resource +- [ ] update_app +- [ ] update_elastic_ip +- [ ] update_instance +- [ ] update_layer +- [ ] update_my_user_profile +- [ ] update_rds_db_instance +- [ ] update_stack +- [ ] update_user_profile +- [ ] update_volume + +## opsworkscm - 0% implemented +- [ ] associate_node +- [ ] create_backup +- [ ] create_server +- [ ] delete_backup +- [ ] delete_server +- [ ] describe_account_attributes +- [ ] describe_backups +- [ ] describe_events +- [ ] describe_node_association_status +- [ ] describe_servers +- [ ] disassociate_node +- [ ] restore_server +- [ ] start_maintenance +- [ ] update_server +- [ ] update_server_engine_attributes + +## organizations - 0% implemented +- [ ] accept_handshake +- [ ] attach_policy +- [ ] cancel_handshake +- [ ] create_account +- [ ] create_organization +- [ ] create_organizational_unit +- [ ] create_policy +- [ ] decline_handshake +- [ ] delete_organization +- [ ] delete_organizational_unit +- [ ] delete_policy +- [ ] describe_account +- [ ] describe_create_account_status +- [ ] describe_handshake +- [ ] describe_organization +- [ ] describe_organizational_unit +- [ ] describe_policy +- [ ] detach_policy +- [ ] disable_aws_service_access +- [ ] disable_policy_type +- [ ] enable_all_features +- [ ] enable_aws_service_access +- [ ] enable_policy_type +- [ ] invite_account_to_organization +- [ ] leave_organization +- [ ] list_accounts +- [ ] list_accounts_for_parent +- [ ] list_aws_service_access_for_organization +- [ ] list_children +- [ ] list_create_account_status +- [ ] list_handshakes_for_account +- [ ] list_handshakes_for_organization +- [ ] list_organizational_units_for_parent +- [ ] list_parents +- [ ] list_policies +- [ ] list_policies_for_target +- [ ] list_roots +- [ ] list_targets_for_policy +- [ ] move_account +- [ ] remove_account_from_organization +- [ ] update_organizational_unit +- [ ] update_policy + +## pinpoint - 0% implemented +- [ ] create_app +- [ ] create_campaign +- [ ] create_import_job +- [ ] create_segment +- [ ] delete_adm_channel +- [ ] delete_apns_channel +- [ ] delete_apns_sandbox_channel +- [ ] delete_apns_voip_channel +- [ ] delete_apns_voip_sandbox_channel +- [ ] delete_app +- [ ] delete_baidu_channel +- [ ] delete_campaign +- [ ] delete_email_channel +- [ ] delete_event_stream +- [ ] delete_gcm_channel +- [ ] delete_segment +- [ ] delete_sms_channel +- [ ] get_adm_channel +- [ ] get_apns_channel +- [ ] get_apns_sandbox_channel +- [ ] get_apns_voip_channel +- [ ] get_apns_voip_sandbox_channel +- [ ] get_app +- [ ] get_application_settings +- [ ] get_apps +- [ ] get_baidu_channel +- [ ] get_campaign +- [ ] get_campaign_activities +- [ ] get_campaign_version +- [ ] get_campaign_versions +- [ ] get_campaigns +- [ ] get_email_channel +- [ ] get_endpoint +- [ ] get_event_stream +- [ ] get_gcm_channel +- [ ] get_import_job +- [ ] get_import_jobs +- [ ] get_segment +- [ ] get_segment_import_jobs +- [ ] get_segment_version +- [ ] get_segment_versions +- [ ] get_segments +- [ ] get_sms_channel +- [ ] put_event_stream +- [ ] send_messages +- [ ] send_users_messages +- [ ] update_adm_channel +- [ ] update_apns_channel +- [ ] update_apns_sandbox_channel +- [ ] update_apns_voip_channel +- [ ] update_apns_voip_sandbox_channel +- [ ] update_application_settings +- [ ] update_baidu_channel +- [ ] update_campaign +- [ ] update_email_channel +- [ ] update_endpoint +- [ ] update_endpoints_batch +- [ ] update_gcm_channel +- [ ] update_segment +- [ ] update_sms_channel + +## polly - 83% implemented +- [X] delete_lexicon +- [X] describe_voices +- [X] get_lexicon +- [X] list_lexicons +- [X] put_lexicon +- [ ] synthesize_speech + +## pricing - 0% implemented +- [ ] describe_services +- [ ] get_attribute_values +- [ ] get_products + +## rds - 0% implemented +- [ ] add_role_to_db_cluster +- [ ] add_source_identifier_to_subscription +- [ ] add_tags_to_resource +- [ ] apply_pending_maintenance_action +- [ ] authorize_db_security_group_ingress +- [ ] copy_db_cluster_parameter_group +- [ ] copy_db_cluster_snapshot +- [ ] copy_db_parameter_group +- [ ] copy_db_snapshot +- [ ] copy_option_group +- [ ] create_db_cluster +- [ ] create_db_cluster_parameter_group +- [ ] create_db_cluster_snapshot +- [ ] create_db_instance +- [ ] create_db_instance_read_replica +- [ ] create_db_parameter_group +- [ ] create_db_security_group +- [ ] create_db_snapshot +- [ ] create_db_subnet_group +- [ ] create_event_subscription +- [ ] create_option_group +- [ ] delete_db_cluster +- [ ] delete_db_cluster_parameter_group +- [ ] delete_db_cluster_snapshot +- [ ] delete_db_instance +- [ ] delete_db_parameter_group +- [ ] delete_db_security_group +- [ ] delete_db_snapshot +- [ ] delete_db_subnet_group +- [ ] delete_event_subscription +- [ ] delete_option_group +- [ ] describe_account_attributes +- [ ] describe_certificates +- [ ] describe_db_cluster_parameter_groups +- [ ] describe_db_cluster_parameters +- [ ] describe_db_cluster_snapshot_attributes +- [ ] describe_db_cluster_snapshots +- [ ] describe_db_clusters +- [ ] describe_db_engine_versions +- [ ] describe_db_instances +- [ ] describe_db_log_files +- [ ] describe_db_parameter_groups +- [ ] describe_db_parameters +- [ ] describe_db_security_groups +- [ ] describe_db_snapshot_attributes +- [ ] describe_db_snapshots +- [ ] describe_db_subnet_groups +- [ ] describe_engine_default_cluster_parameters +- [ ] describe_engine_default_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_option_group_options +- [ ] describe_option_groups +- [ ] describe_orderable_db_instance_options +- [ ] describe_pending_maintenance_actions +- [ ] describe_reserved_db_instances +- [ ] describe_reserved_db_instances_offerings +- [ ] describe_source_regions +- [ ] describe_valid_db_instance_modifications +- [ ] download_db_log_file_portion +- [ ] failover_db_cluster +- [ ] list_tags_for_resource +- [ ] modify_db_cluster +- [ ] modify_db_cluster_parameter_group +- [ ] modify_db_cluster_snapshot_attribute +- [ ] modify_db_instance +- [ ] modify_db_parameter_group +- [ ] modify_db_snapshot +- [ ] modify_db_snapshot_attribute +- [ ] modify_db_subnet_group +- [ ] modify_event_subscription +- [ ] modify_option_group +- [ ] promote_read_replica +- [ ] promote_read_replica_db_cluster +- [ ] purchase_reserved_db_instances_offering +- [ ] reboot_db_instance +- [ ] remove_role_from_db_cluster +- [ ] remove_source_identifier_from_subscription +- [ ] remove_tags_from_resource +- [ ] reset_db_cluster_parameter_group +- [ ] reset_db_parameter_group +- [ ] restore_db_cluster_from_s3 +- [ ] restore_db_cluster_from_snapshot +- [ ] restore_db_cluster_to_point_in_time +- [ ] restore_db_instance_from_db_snapshot +- [ ] restore_db_instance_from_s3 +- [ ] restore_db_instance_to_point_in_time +- [ ] revoke_db_security_group_ingress +- [ ] start_db_instance +- [ ] stop_db_instance + +## redshift - 31% implemented +- [ ] authorize_cluster_security_group_ingress +- [ ] authorize_snapshot_access +- [ ] copy_cluster_snapshot +- [X] create_cluster +- [X] create_cluster_parameter_group +- [X] create_cluster_security_group +- [X] create_cluster_snapshot +- [X] create_cluster_subnet_group +- [ ] create_event_subscription +- [ ] create_hsm_client_certificate +- [ ] create_hsm_configuration +- [ ] create_snapshot_copy_grant +- [X] create_tags +- [X] delete_cluster +- [X] delete_cluster_parameter_group +- [X] delete_cluster_security_group +- [X] delete_cluster_snapshot +- [X] delete_cluster_subnet_group +- [ ] delete_event_subscription +- [ ] delete_hsm_client_certificate +- [ ] delete_hsm_configuration +- [ ] delete_snapshot_copy_grant +- [X] delete_tags +- [X] describe_cluster_parameter_groups +- [ ] describe_cluster_parameters +- [X] describe_cluster_security_groups +- [X] describe_cluster_snapshots +- [X] describe_cluster_subnet_groups +- [ ] describe_cluster_versions +- [X] describe_clusters +- [ ] describe_default_cluster_parameters +- [ ] describe_event_categories +- [ ] describe_event_subscriptions +- [ ] describe_events +- [ ] describe_hsm_client_certificates +- [ ] describe_hsm_configurations +- [ ] describe_logging_status +- [ ] describe_orderable_cluster_options +- [ ] describe_reserved_node_offerings +- [ ] describe_reserved_nodes +- [ ] describe_resize +- [ ] describe_snapshot_copy_grants +- [ ] describe_table_restore_status +- [X] describe_tags +- [ ] disable_logging +- [ ] disable_snapshot_copy +- [ ] enable_logging +- [ ] enable_snapshot_copy +- [ ] get_cluster_credentials +- [X] modify_cluster +- [ ] modify_cluster_iam_roles +- [ ] modify_cluster_parameter_group +- [ ] modify_cluster_subnet_group +- [ ] modify_event_subscription +- [ ] modify_snapshot_copy_retention_period +- [ ] purchase_reserved_node_offering +- [ ] reboot_cluster +- [ ] reset_cluster_parameter_group +- [X] restore_from_cluster_snapshot +- [ ] restore_table_from_cluster_snapshot +- [ ] revoke_cluster_security_group_ingress +- [ ] revoke_snapshot_access +- [ ] rotate_encryption_key + +## rekognition - 0% implemented +- [ ] compare_faces +- [ ] create_collection +- [ ] delete_collection +- [ ] delete_faces +- [ ] detect_faces +- [ ] detect_labels +- [ ] detect_moderation_labels +- [ ] get_celebrity_info +- [ ] index_faces +- [ ] list_collections +- [ ] list_faces +- [ ] recognize_celebrities +- [ ] search_faces +- [ ] search_faces_by_image + +## resourcegroupstaggingapi - 60% implemented +- [X] get_resources +- [X] get_tag_keys +- [X] get_tag_values +- [ ] tag_resources +- [ ] untag_resources + +## route53 - 12% implemented +- [ ] associate_vpc_with_hosted_zone +- [ ] change_resource_record_sets +- [X] change_tags_for_resource +- [X] create_health_check +- [X] create_hosted_zone +- [ ] create_query_logging_config +- [ ] create_reusable_delegation_set +- [ ] create_traffic_policy +- [ ] create_traffic_policy_instance +- [ ] create_traffic_policy_version +- [ ] create_vpc_association_authorization +- [X] delete_health_check +- [X] delete_hosted_zone +- [ ] delete_query_logging_config +- [ ] delete_reusable_delegation_set +- [ ] delete_traffic_policy +- [ ] delete_traffic_policy_instance +- [ ] delete_vpc_association_authorization +- [ ] disassociate_vpc_from_hosted_zone +- [ ] get_account_limit +- [ ] get_change +- [ ] get_checker_ip_ranges +- [ ] get_geo_location +- [ ] get_health_check +- [ ] get_health_check_count +- [ ] get_health_check_last_failure_reason +- [ ] get_health_check_status +- [X] get_hosted_zone +- [ ] get_hosted_zone_count +- [ ] get_hosted_zone_limit +- [ ] get_query_logging_config +- [ ] get_reusable_delegation_set +- [ ] get_reusable_delegation_set_limit +- [ ] get_traffic_policy +- [ ] get_traffic_policy_instance +- [ ] get_traffic_policy_instance_count +- [ ] list_geo_locations +- [ ] list_health_checks +- [ ] list_hosted_zones +- [ ] list_hosted_zones_by_name +- [ ] list_query_logging_configs +- [ ] list_resource_record_sets +- [ ] list_reusable_delegation_sets +- [X] list_tags_for_resource +- [ ] list_tags_for_resources +- [ ] list_traffic_policies +- [ ] list_traffic_policy_instances +- [ ] list_traffic_policy_instances_by_hosted_zone +- [ ] list_traffic_policy_instances_by_policy +- [ ] list_traffic_policy_versions +- [ ] list_vpc_association_authorizations +- [ ] test_dns_answer +- [ ] update_health_check +- [ ] update_hosted_zone_comment +- [ ] update_traffic_policy_comment +- [ ] update_traffic_policy_instance + +## route53domains - 0% implemented +- [ ] check_domain_availability +- [ ] check_domain_transferability +- [ ] delete_tags_for_domain +- [ ] disable_domain_auto_renew +- [ ] disable_domain_transfer_lock +- [ ] enable_domain_auto_renew +- [ ] enable_domain_transfer_lock +- [ ] get_contact_reachability_status +- [ ] get_domain_detail +- [ ] get_domain_suggestions +- [ ] get_operation_detail +- [ ] list_domains +- [ ] list_operations +- [ ] list_tags_for_domain +- [ ] register_domain +- [ ] renew_domain +- [ ] resend_contact_reachability_email +- [ ] retrieve_domain_auth_code +- [ ] transfer_domain +- [ ] update_domain_contact +- [ ] update_domain_contact_privacy +- [ ] update_domain_nameservers +- [ ] update_tags_for_domain +- [ ] view_billing + +## s3 - 12% implemented +- [ ] abort_multipart_upload +- [ ] complete_multipart_upload +- [ ] copy_object +- [X] create_bucket +- [ ] create_multipart_upload +- [X] delete_bucket +- [ ] delete_bucket_analytics_configuration +- [X] delete_bucket_cors +- [ ] delete_bucket_encryption +- [ ] delete_bucket_inventory_configuration +- [ ] delete_bucket_lifecycle +- [ ] delete_bucket_metrics_configuration +- [X] delete_bucket_policy +- [ ] delete_bucket_replication +- [X] delete_bucket_tagging +- [ ] delete_bucket_website +- [ ] delete_object +- [ ] delete_object_tagging +- [ ] delete_objects +- [ ] get_bucket_accelerate_configuration +- [X] get_bucket_acl +- [ ] get_bucket_analytics_configuration +- [ ] get_bucket_cors +- [ ] get_bucket_encryption +- [ ] get_bucket_inventory_configuration +- [ ] get_bucket_lifecycle +- [ ] get_bucket_lifecycle_configuration +- [ ] get_bucket_location +- [ ] get_bucket_logging +- [ ] get_bucket_metrics_configuration +- [ ] get_bucket_notification +- [ ] get_bucket_notification_configuration +- [X] get_bucket_policy +- [ ] get_bucket_replication +- [ ] get_bucket_request_payment +- [ ] get_bucket_tagging +- [X] get_bucket_versioning +- [ ] get_bucket_website +- [ ] get_object +- [ ] get_object_acl +- [ ] get_object_tagging +- [ ] get_object_torrent +- [ ] head_bucket +- [ ] head_object +- [ ] list_bucket_analytics_configurations +- [ ] list_bucket_inventory_configurations +- [ ] list_bucket_metrics_configurations +- [ ] list_buckets +- [ ] list_multipart_uploads +- [ ] list_object_versions +- [ ] list_objects +- [ ] list_objects_v2 +- [ ] list_parts +- [ ] put_bucket_accelerate_configuration +- [ ] put_bucket_acl +- [ ] put_bucket_analytics_configuration +- [X] put_bucket_cors +- [ ] put_bucket_encryption +- [ ] put_bucket_inventory_configuration +- [ ] put_bucket_lifecycle +- [ ] put_bucket_lifecycle_configuration +- [ ] put_bucket_logging +- [ ] put_bucket_metrics_configuration +- [ ] put_bucket_notification +- [ ] put_bucket_notification_configuration +- [ ] put_bucket_policy +- [ ] put_bucket_replication +- [ ] put_bucket_request_payment +- [X] put_bucket_tagging +- [ ] put_bucket_versioning +- [ ] put_bucket_website +- [ ] put_object +- [ ] put_object_acl +- [ ] put_object_tagging +- [ ] restore_object +- [ ] upload_part +- [ ] upload_part_copy + +## sdb - 0% implemented +- [ ] batch_delete_attributes +- [ ] batch_put_attributes +- [ ] create_domain +- [ ] delete_attributes +- [ ] delete_domain +- [ ] domain_metadata +- [ ] get_attributes +- [ ] list_domains +- [ ] put_attributes +- [ ] select + +## servicecatalog - 0% implemented +- [ ] accept_portfolio_share +- [ ] associate_principal_with_portfolio +- [ ] associate_product_with_portfolio +- [ ] associate_tag_option_with_resource +- [ ] copy_product +- [ ] create_constraint +- [ ] create_portfolio +- [ ] create_portfolio_share +- [ ] create_product +- [ ] create_provisioning_artifact +- [ ] create_tag_option +- [ ] delete_constraint +- [ ] delete_portfolio +- [ ] delete_portfolio_share +- [ ] delete_product +- [ ] delete_provisioning_artifact +- [ ] describe_constraint +- [ ] describe_copy_product_status +- [ ] describe_portfolio +- [ ] describe_product +- [ ] describe_product_as_admin +- [ ] describe_product_view +- [ ] describe_provisioned_product +- [ ] describe_provisioning_artifact +- [ ] describe_provisioning_parameters +- [ ] describe_record +- [ ] describe_tag_option +- [ ] disassociate_principal_from_portfolio +- [ ] disassociate_product_from_portfolio +- [ ] disassociate_tag_option_from_resource +- [ ] list_accepted_portfolio_shares +- [ ] list_constraints_for_portfolio +- [ ] list_launch_paths +- [ ] list_portfolio_access +- [ ] list_portfolios +- [ ] list_portfolios_for_product +- [ ] list_principals_for_portfolio +- [ ] list_provisioning_artifacts +- [ ] list_record_history +- [ ] list_resources_for_tag_option +- [ ] list_tag_options +- [ ] provision_product +- [ ] reject_portfolio_share +- [ ] scan_provisioned_products +- [ ] search_products +- [ ] search_products_as_admin +- [ ] terminate_provisioned_product +- [ ] update_constraint +- [ ] update_portfolio +- [ ] update_product +- [ ] update_provisioned_product +- [ ] update_provisioning_artifact +- [ ] update_tag_option + +## ses - 12% implemented +- [ ] clone_receipt_rule_set +- [ ] create_configuration_set +- [ ] create_configuration_set_event_destination +- [ ] create_configuration_set_tracking_options +- [ ] create_receipt_filter +- [ ] create_receipt_rule +- [ ] create_receipt_rule_set +- [ ] create_template +- [ ] delete_configuration_set +- [ ] delete_configuration_set_event_destination +- [ ] delete_configuration_set_tracking_options +- [X] delete_identity +- [ ] delete_identity_policy +- [ ] delete_receipt_filter +- [ ] delete_receipt_rule +- [ ] delete_receipt_rule_set +- [ ] delete_template +- [ ] delete_verified_email_address +- [ ] describe_active_receipt_rule_set +- [ ] describe_configuration_set +- [ ] describe_receipt_rule +- [ ] describe_receipt_rule_set +- [ ] get_account_sending_enabled +- [ ] get_identity_dkim_attributes +- [ ] get_identity_mail_from_domain_attributes +- [ ] get_identity_notification_attributes +- [ ] get_identity_policies +- [ ] get_identity_verification_attributes +- [X] get_send_quota +- [ ] get_send_statistics +- [ ] get_template +- [ ] list_configuration_sets +- [X] list_identities +- [ ] list_identity_policies +- [ ] list_receipt_filters +- [ ] list_receipt_rule_sets +- [ ] list_templates +- [X] list_verified_email_addresses +- [ ] put_identity_policy +- [ ] reorder_receipt_rule_set +- [ ] send_bounce +- [ ] send_bulk_templated_email +- [X] send_email +- [X] send_raw_email +- [ ] send_templated_email +- [ ] set_active_receipt_rule_set +- [ ] set_identity_dkim_enabled +- [ ] set_identity_feedback_forwarding_enabled +- [ ] set_identity_headers_in_notifications_enabled +- [ ] set_identity_mail_from_domain +- [ ] set_identity_notification_topic +- [ ] set_receipt_rule_position +- [ ] test_render_template +- [ ] update_account_sending_enabled +- [ ] update_configuration_set_event_destination +- [ ] update_configuration_set_reputation_metrics_enabled +- [ ] update_configuration_set_sending_enabled +- [ ] update_configuration_set_tracking_options +- [ ] update_receipt_rule +- [ ] update_template +- [ ] verify_domain_dkim +- [ ] verify_domain_identity +- [X] verify_email_address +- [X] verify_email_identity + +## shield - 0% implemented +- [ ] create_protection +- [ ] create_subscription +- [ ] delete_protection +- [ ] delete_subscription +- [ ] describe_attack +- [ ] describe_protection +- [ ] describe_subscription +- [ ] list_attacks +- [ ] list_protections + +## sms - 0% implemented +- [ ] create_replication_job +- [ ] delete_replication_job +- [ ] delete_server_catalog +- [ ] disassociate_connector +- [ ] get_connectors +- [ ] get_replication_jobs +- [ ] get_replication_runs +- [ ] get_servers +- [ ] import_server_catalog +- [ ] start_on_demand_replication_run +- [ ] update_replication_job + +## snowball - 0% implemented +- [ ] cancel_cluster +- [ ] cancel_job +- [ ] create_address +- [ ] create_cluster +- [ ] create_job +- [ ] describe_address +- [ ] describe_addresses +- [ ] describe_cluster +- [ ] describe_job +- [ ] get_job_manifest +- [ ] get_job_unlock_code +- [ ] get_snowball_usage +- [ ] list_cluster_jobs +- [ ] list_clusters +- [ ] list_jobs +- [ ] update_cluster +- [ ] update_job + +## sns - 53% implemented +- [ ] add_permission +- [ ] check_if_phone_number_is_opted_out +- [ ] confirm_subscription +- [X] create_platform_application +- [X] create_platform_endpoint +- [X] create_topic +- [X] delete_endpoint +- [X] delete_platform_application +- [X] delete_topic +- [ ] get_endpoint_attributes +- [ ] get_platform_application_attributes +- [ ] get_sms_attributes +- [X] get_subscription_attributes +- [ ] get_topic_attributes +- [X] list_endpoints_by_platform_application +- [ ] list_phone_numbers_opted_out +- [X] list_platform_applications +- [X] list_subscriptions +- [ ] list_subscriptions_by_topic +- [X] list_topics +- [ ] opt_in_phone_number +- [X] publish +- [ ] remove_permission +- [X] set_endpoint_attributes +- [ ] set_platform_application_attributes +- [ ] set_sms_attributes +- [X] set_subscription_attributes +- [ ] set_topic_attributes +- [X] subscribe +- [X] unsubscribe + +## sqs - 60% implemented +- [X] add_permission +- [X] change_message_visibility +- [ ] change_message_visibility_batch +- [X] create_queue +- [X] delete_message +- [ ] delete_message_batch +- [X] delete_queue +- [ ] get_queue_attributes +- [ ] get_queue_url +- [X] list_dead_letter_source_queues +- [ ] list_queue_tags +- [X] list_queues +- [X] purge_queue +- [ ] receive_message +- [X] remove_permission +- [X] send_message +- [ ] send_message_batch +- [ ] set_queue_attributes +- [X] tag_queue +- [X] untag_queue + +## ssm - 9% implemented +- [X] add_tags_to_resource +- [ ] cancel_command +- [ ] create_activation +- [ ] create_association +- [ ] create_association_batch +- [ ] create_document +- [ ] create_maintenance_window +- [ ] create_patch_baseline +- [ ] create_resource_data_sync +- [ ] delete_activation +- [ ] delete_association +- [ ] delete_document +- [ ] delete_maintenance_window +- [X] delete_parameter +- [X] delete_parameters +- [ ] delete_patch_baseline +- [ ] delete_resource_data_sync +- [ ] deregister_managed_instance +- [ ] deregister_patch_baseline_for_patch_group +- [ ] deregister_target_from_maintenance_window +- [ ] deregister_task_from_maintenance_window +- [ ] describe_activations +- [ ] describe_association +- [ ] describe_automation_executions +- [ ] describe_available_patches +- [ ] describe_document +- [ ] describe_document_permission +- [ ] describe_effective_instance_associations +- [ ] describe_effective_patches_for_patch_baseline +- [ ] describe_instance_associations_status +- [ ] describe_instance_information +- [ ] describe_instance_patch_states +- [ ] describe_instance_patch_states_for_patch_group +- [ ] describe_instance_patches +- [ ] describe_maintenance_window_execution_task_invocations +- [ ] describe_maintenance_window_execution_tasks +- [ ] describe_maintenance_window_executions +- [ ] describe_maintenance_window_targets +- [ ] describe_maintenance_window_tasks +- [ ] describe_maintenance_windows +- [ ] describe_parameters +- [ ] describe_patch_baselines +- [ ] describe_patch_group_state +- [ ] describe_patch_groups +- [ ] get_automation_execution +- [ ] get_command_invocation +- [ ] get_default_patch_baseline +- [ ] get_deployable_patch_snapshot_for_instance +- [ ] get_document +- [ ] get_inventory +- [ ] get_inventory_schema +- [ ] get_maintenance_window +- [ ] get_maintenance_window_execution +- [ ] get_maintenance_window_execution_task +- [ ] get_maintenance_window_execution_task_invocation +- [ ] get_maintenance_window_task +- [X] get_parameter +- [ ] get_parameter_history +- [X] get_parameters +- [X] get_parameters_by_path +- [ ] get_patch_baseline +- [ ] get_patch_baseline_for_patch_group +- [ ] list_association_versions +- [ ] list_associations +- [ ] list_command_invocations +- [ ] list_commands +- [ ] list_compliance_items +- [ ] list_compliance_summaries +- [ ] list_document_versions +- [ ] list_documents +- [ ] list_inventory_entries +- [ ] list_resource_compliance_summaries +- [ ] list_resource_data_sync +- [X] list_tags_for_resource +- [ ] modify_document_permission +- [ ] put_compliance_items +- [ ] put_inventory +- [X] put_parameter +- [ ] register_default_patch_baseline +- [ ] register_patch_baseline_for_patch_group +- [ ] register_target_with_maintenance_window +- [ ] register_task_with_maintenance_window +- [X] remove_tags_from_resource +- [ ] send_automation_signal +- [ ] send_command +- [ ] start_automation_execution +- [ ] stop_automation_execution +- [ ] update_association +- [ ] update_association_status +- [ ] update_document +- [ ] update_document_default_version +- [ ] update_maintenance_window +- [ ] update_maintenance_window_target +- [ ] update_maintenance_window_task +- [ ] update_managed_instance_role +- [ ] update_patch_baseline + +## stepfunctions - 0% implemented +- [ ] create_activity +- [ ] create_state_machine +- [ ] delete_activity +- [ ] delete_state_machine +- [ ] describe_activity +- [ ] describe_execution +- [ ] describe_state_machine +- [ ] describe_state_machine_for_execution +- [ ] get_activity_task +- [ ] get_execution_history +- [ ] list_activities +- [ ] list_executions +- [ ] list_state_machines +- [ ] send_task_failure +- [ ] send_task_heartbeat +- [ ] send_task_success +- [ ] start_execution +- [ ] stop_execution +- [ ] update_state_machine + +## storagegateway - 0% implemented +- [ ] activate_gateway +- [ ] add_cache +- [ ] add_tags_to_resource +- [ ] add_upload_buffer +- [ ] add_working_storage +- [ ] cancel_archival +- [ ] cancel_retrieval +- [ ] create_cached_iscsi_volume +- [ ] create_nfs_file_share +- [ ] create_snapshot +- [ ] create_snapshot_from_volume_recovery_point +- [ ] create_stored_iscsi_volume +- [ ] create_tape_with_barcode +- [ ] create_tapes +- [ ] delete_bandwidth_rate_limit +- [ ] delete_chap_credentials +- [ ] delete_file_share +- [ ] delete_gateway +- [ ] delete_snapshot_schedule +- [ ] delete_tape +- [ ] delete_tape_archive +- [ ] delete_volume +- [ ] describe_bandwidth_rate_limit +- [ ] describe_cache +- [ ] describe_cached_iscsi_volumes +- [ ] describe_chap_credentials +- [ ] describe_gateway_information +- [ ] describe_maintenance_start_time +- [ ] describe_nfs_file_shares +- [ ] describe_snapshot_schedule +- [ ] describe_stored_iscsi_volumes +- [ ] describe_tape_archives +- [ ] describe_tape_recovery_points +- [ ] describe_tapes +- [ ] describe_upload_buffer +- [ ] describe_vtl_devices +- [ ] describe_working_storage +- [ ] disable_gateway +- [ ] list_file_shares +- [ ] list_gateways +- [ ] list_local_disks +- [ ] list_tags_for_resource +- [ ] list_tapes +- [ ] list_volume_initiators +- [ ] list_volume_recovery_points +- [ ] list_volumes +- [ ] refresh_cache +- [ ] remove_tags_from_resource +- [ ] reset_cache +- [ ] retrieve_tape_archive +- [ ] retrieve_tape_recovery_point +- [ ] set_local_console_password +- [ ] shutdown_gateway +- [ ] start_gateway +- [ ] update_bandwidth_rate_limit +- [ ] update_chap_credentials +- [ ] update_gateway_information +- [ ] update_gateway_software_now +- [ ] update_maintenance_start_time +- [ ] update_nfs_file_share +- [ ] update_snapshot_schedule +- [ ] update_vtl_device_type + +## sts - 42% implemented +- [X] assume_role +- [ ] assume_role_with_saml +- [ ] assume_role_with_web_identity +- [ ] decode_authorization_message +- [ ] get_caller_identity +- [X] get_federation_token +- [X] get_session_token + +## support - 0% implemented +- [ ] add_attachments_to_set +- [ ] add_communication_to_case +- [ ] create_case +- [ ] describe_attachment +- [ ] describe_cases +- [ ] describe_communications +- [ ] describe_services +- [ ] describe_severity_levels +- [ ] describe_trusted_advisor_check_refresh_statuses +- [ ] describe_trusted_advisor_check_result +- [ ] describe_trusted_advisor_check_summaries +- [ ] describe_trusted_advisor_checks +- [ ] refresh_trusted_advisor_check +- [ ] resolve_case + +## swf - 54% implemented +- [ ] count_closed_workflow_executions +- [ ] count_open_workflow_executions +- [X] count_pending_activity_tasks +- [X] count_pending_decision_tasks +- [ ] deprecate_activity_type +- [X] deprecate_domain +- [ ] deprecate_workflow_type +- [ ] describe_activity_type +- [X] describe_domain +- [X] describe_workflow_execution +- [ ] describe_workflow_type +- [ ] get_workflow_execution_history +- [ ] list_activity_types +- [X] list_closed_workflow_executions +- [X] list_domains +- [X] list_open_workflow_executions +- [ ] list_workflow_types +- [X] poll_for_activity_task +- [X] poll_for_decision_task +- [X] record_activity_task_heartbeat +- [ ] register_activity_type +- [X] register_domain +- [ ] register_workflow_type +- [ ] request_cancel_workflow_execution +- [ ] respond_activity_task_canceled +- [X] respond_activity_task_completed +- [X] respond_activity_task_failed +- [X] respond_decision_task_completed +- [ ] signal_workflow_execution +- [X] start_workflow_execution +- [X] terminate_workflow_execution + +## waf - 0% implemented +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_xss_match_set +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## waf-regional - 0% implemented +- [ ] associate_web_acl +- [ ] create_byte_match_set +- [ ] create_geo_match_set +- [ ] create_ip_set +- [ ] create_rate_based_rule +- [ ] create_regex_match_set +- [ ] create_regex_pattern_set +- [ ] create_rule +- [ ] create_size_constraint_set +- [ ] create_sql_injection_match_set +- [ ] create_web_acl +- [ ] create_xss_match_set +- [ ] delete_byte_match_set +- [ ] delete_geo_match_set +- [ ] delete_ip_set +- [ ] delete_rate_based_rule +- [ ] delete_regex_match_set +- [ ] delete_regex_pattern_set +- [ ] delete_rule +- [ ] delete_size_constraint_set +- [ ] delete_sql_injection_match_set +- [ ] delete_web_acl +- [ ] delete_xss_match_set +- [ ] disassociate_web_acl +- [ ] get_byte_match_set +- [ ] get_change_token +- [ ] get_change_token_status +- [ ] get_geo_match_set +- [ ] get_ip_set +- [ ] get_rate_based_rule +- [ ] get_rate_based_rule_managed_keys +- [ ] get_regex_match_set +- [ ] get_regex_pattern_set +- [ ] get_rule +- [ ] get_sampled_requests +- [ ] get_size_constraint_set +- [ ] get_sql_injection_match_set +- [ ] get_web_acl +- [ ] get_web_acl_for_resource +- [ ] get_xss_match_set +- [ ] list_byte_match_sets +- [ ] list_geo_match_sets +- [ ] list_ip_sets +- [ ] list_rate_based_rules +- [ ] list_regex_match_sets +- [ ] list_regex_pattern_sets +- [ ] list_resources_for_web_acl +- [ ] list_rules +- [ ] list_size_constraint_sets +- [ ] list_sql_injection_match_sets +- [ ] list_web_acls +- [ ] list_xss_match_sets +- [ ] update_byte_match_set +- [ ] update_geo_match_set +- [ ] update_ip_set +- [ ] update_rate_based_rule +- [ ] update_regex_match_set +- [ ] update_regex_pattern_set +- [ ] update_rule +- [ ] update_size_constraint_set +- [ ] update_sql_injection_match_set +- [ ] update_web_acl +- [ ] update_xss_match_set + +## workdocs - 0% implemented +- [ ] abort_document_version_upload +- [ ] activate_user +- [ ] add_resource_permissions +- [ ] create_comment +- [ ] create_custom_metadata +- [ ] create_folder +- [ ] create_labels +- [ ] create_notification_subscription +- [ ] create_user +- [ ] deactivate_user +- [ ] delete_comment +- [ ] delete_custom_metadata +- [ ] delete_document +- [ ] delete_folder +- [ ] delete_folder_contents +- [ ] delete_labels +- [ ] delete_notification_subscription +- [ ] delete_user +- [ ] describe_activities +- [ ] describe_comments +- [ ] describe_document_versions +- [ ] describe_folder_contents +- [ ] describe_notification_subscriptions +- [ ] describe_resource_permissions +- [ ] describe_root_folders +- [ ] describe_users +- [ ] get_current_user +- [ ] get_document +- [ ] get_document_path +- [ ] get_document_version +- [ ] get_folder +- [ ] get_folder_path +- [ ] initiate_document_version_upload +- [ ] remove_all_resource_permissions +- [ ] remove_resource_permission +- [ ] update_document +- [ ] update_document_version +- [ ] update_folder +- [ ] update_user + +## workspaces - 0% implemented +- [ ] create_tags +- [ ] create_workspaces +- [ ] delete_tags +- [ ] describe_tags +- [ ] describe_workspace_bundles +- [ ] describe_workspace_directories +- [ ] describe_workspaces +- [ ] describe_workspaces_connection_status +- [ ] modify_workspace_properties +- [ ] reboot_workspaces +- [ ] rebuild_workspaces +- [ ] start_workspaces +- [ ] stop_workspaces +- [ ] terminate_workspaces + +## xray - 0% implemented +- [ ] batch_get_traces +- [ ] get_service_graph +- [ ] get_trace_graph +- [ ] get_trace_summaries +- [ ] put_telemetry_records +- [ ] put_trace_segments diff --git a/MANIFEST.in b/MANIFEST.in index 7e219f463..43e8120e4 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,6 @@ include README.md LICENSE AUTHORS.md include requirements.txt requirements-dev.txt tox.ini include moto/ec2/resources/instance_types.json +include moto/ec2/resources/amis.json +recursive-include moto/templates * recursive-include tests * diff --git a/Makefile b/Makefile index a963c8293..99b7f2620 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,13 @@ SHELL := /bin/bash +ifeq ($(TEST_SERVER_MODE), true) + # exclude test_iot and test_iotdata for now + # because authentication of iot is very complicated + TEST_EXCLUDE := --exclude='test_iot.*' +else + TEST_EXCLUDE := +endif + init: @python setup.py develop @pip install -r requirements.txt @@ -10,8 +18,7 @@ lint: test: lint rm -f .coverage rm -rf cover - @nosetests -sv --with-coverage --cover-html ./tests/ - + @nosetests -sv --with-coverage --cover-html ./tests/ $(TEST_EXCLUDE) test_server: @TEST_SERVER_MODE=true nosetests -sv --with-coverage --cover-html ./tests/ @@ -29,7 +36,14 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: upload_pypi_artifact push_dockerhub_image tag_github_release +publish: implementation_coverage \ + upload_pypi_artifact \ + tag_github_release \ + push_dockerhub_image + +implementation_coverage: + ./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md + git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" scaffold: @pip install -r requirements-dev.txt > /dev/null diff --git a/README.md b/README.md index 7ced7b895..59dc67432 100644 --- a/README.md +++ b/README.md @@ -68,10 +68,12 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | Cloudwatch | @mock_cloudwatch | basic endpoints done | |------------------------------------------------------------------------------| +| CloudwatchEvents | @mock_events | all endpoints done | +|------------------------------------------------------------------------------| | Data Pipeline | @mock_datapipeline| basic endpoints done | |------------------------------------------------------------------------------| | DynamoDB | @mock_dynamodb | core endpoints done | -| DynamoDB2 | @mock_dynamodb2 | core endpoints + partial indexes | +| DynamoDB2 | @mock_dynamodb2 | all endpoints + partial indexes | |------------------------------------------------------------------------------| | EC2 | @mock_ec2 | core endpoints done | | - AMI | | core endpoints done | @@ -86,7 +88,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | ELB | @mock_elb | core endpoints done | |------------------------------------------------------------------------------| -| ELBv2 | @mock_elbv2 | core endpoints done | +| ELBv2 | @mock_elbv2 | all endpoints done | |------------------------------------------------------------------------------| | EMR | @mock_emr | core endpoints done | |------------------------------------------------------------------------------| @@ -94,6 +96,9 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | IAM | @mock_iam | core endpoints done | |------------------------------------------------------------------------------| +| IoT | @mock_iot | core endpoints done | +| | @mock_iotdata | core endpoints done | +|------------------------------------------------------------------------------| | Lambda | @mock_lambda | basic endpoints done, requires | | | | docker | |------------------------------------------------------------------------------| @@ -115,7 +120,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | S3 | @mock_s3 | core endpoints done | |------------------------------------------------------------------------------| -| SES | @mock_ses | core endpoints done | +| SES | @mock_ses | all endpoints done | |------------------------------------------------------------------------------| | SNS | @mock_sns | all endpoints done | |------------------------------------------------------------------------------| @@ -127,7 +132,7 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | SWF | @mock_swf | basic endpoints done | |------------------------------------------------------------------------------| -| X-Ray | @mock_xray | core endpoints done | +| X-Ray | @mock_xray | all endpoints done | |------------------------------------------------------------------------------| ``` @@ -297,6 +302,7 @@ boto3.resource( ## Install + ```console $ pip install moto ``` diff --git a/moto/__init__.py b/moto/__init__.py index 64baa52ac..3508dfeda 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -38,8 +38,12 @@ from .sts import mock_sts, mock_sts_deprecated # flake8: noqa from .ssm import mock_ssm # flake8: noqa from .route53 import mock_route53, mock_route53_deprecated # flake8: noqa from .swf import mock_swf, mock_swf_deprecated # flake8: noqa -from .xray import mock_xray # flake8: noqa +from .xray import mock_xray, mock_xray_client, XRaySegment # flake8: noqa from .logs import mock_logs, mock_logs_deprecated # flake8: noqa +from .batch import mock_batch # flake8: noqa +from .resourcegroupstaggingapi import mock_resourcegroupstaggingapi # flake8: noqa +from .iot import mock_iot # flake8: noqa +from .iotdata import mock_iotdata # flake8: noqa try: diff --git a/moto/acm/models.py b/moto/acm/models.py index de26529a4..39be8945d 100644 --- a/moto/acm/models.py +++ b/moto/acm/models.py @@ -170,7 +170,7 @@ class CertBundle(BaseModel): try: self._cert = cryptography.x509.load_pem_x509_certificate(self.cert, default_backend()) - now = datetime.datetime.now() + now = datetime.datetime.utcnow() if self._cert.not_valid_after < now: raise AWSValidationException('The certificate has expired, is not valid.') diff --git a/moto/acm/responses.py b/moto/acm/responses.py index 7bf12bbb8..431a8cf60 100644 --- a/moto/acm/responses.py +++ b/moto/acm/responses.py @@ -185,7 +185,7 @@ class AWSCertificateManagerResponse(BaseResponse): idempotency_token = self._get_param('IdempotencyToken') subject_alt_names = self._get_param('SubjectAlternativeNames') - if len(subject_alt_names) > 10: + if subject_alt_names is not None and len(subject_alt_names) > 10: # There is initial AWS limit of 10 msg = 'An ACM limit has been exceeded. Need to request SAN limit to be raised' return json.dumps({'__type': 'LimitExceededException', 'message': msg}), dict(status=400) diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py new file mode 100644 index 000000000..15b2e4f4a --- /dev/null +++ b/moto/autoscaling/exceptions.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from moto.core.exceptions import RESTError + + +class AutoscalingClientError(RESTError): + code = 500 + + +class ResourceContentionError(AutoscalingClientError): + + def __init__(self): + super(ResourceContentionError, self).__init__( + "ResourceContentionError", + "You already have a pending update to an Auto Scaling resource (for example, a group, instance, or load balancer).") diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index 9df9fea12..ab99e4119 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -4,7 +4,11 @@ from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends from moto.elb import elb_backends +from moto.elbv2 import elbv2_backends from moto.elb.exceptions import LoadBalancerNotFoundError +from .exceptions import ( + ResourceContentionError, +) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown DEFAULT_COOLDOWN = 300 @@ -13,9 +17,10 @@ ASG_NAME_TAG = "aws:autoscaling:groupName" class InstanceState(object): - def __init__(self, instance, lifecycle_state="InService"): + def __init__(self, instance, lifecycle_state="InService", health_status="Healthy"): self.instance = instance self.lifecycle_state = lifecycle_state + self.health_status = health_status class FakeScalingPolicy(BaseModel): @@ -146,7 +151,7 @@ class FakeAutoScalingGroup(BaseModel): def __init__(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - load_balancers, placement_group, termination_policies, + load_balancers, target_group_arns, placement_group, termination_policies, autoscaling_backend, tags): self.autoscaling_backend = autoscaling_backend self.name = name @@ -163,6 +168,7 @@ class FakeAutoScalingGroup(BaseModel): self.health_check_period = health_check_period self.health_check_type = health_check_type if health_check_type else "EC2" self.load_balancers = load_balancers + self.target_group_arns = target_group_arns self.placement_group = placement_group self.termination_policies = termination_policies @@ -176,9 +182,10 @@ class FakeAutoScalingGroup(BaseModel): launch_config_name = properties.get("LaunchConfigurationName") load_balancer_names = properties.get("LoadBalancerNames", []) + target_group_arns = properties.get("TargetGroupARNs", []) backend = autoscaling_backends[region_name] - group = backend.create_autoscaling_group( + group = backend.create_auto_scaling_group( name=resource_name, availability_zones=properties.get("AvailabilityZones", []), desired_capacity=properties.get("DesiredCapacity"), @@ -191,6 +198,7 @@ class FakeAutoScalingGroup(BaseModel): health_check_period=properties.get("HealthCheckGracePeriod"), health_check_type=properties.get("HealthCheckType"), load_balancers=load_balancer_names, + target_group_arns=target_group_arns, placement_group=None, termination_policies=properties.get("TerminationPolicies", []), tags=properties.get("Tags", []), @@ -207,13 +215,13 @@ class FakeAutoScalingGroup(BaseModel): def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): backend = autoscaling_backends[region_name] try: - backend.delete_autoscaling_group(resource_name) + backend.delete_auto_scaling_group(resource_name) except KeyError: pass def delete(self, region_name): backend = autoscaling_backends[region_name] - backend.delete_autoscaling_group(self.name) + backend.delete_auto_scaling_group(self.name) @property def physical_resource_id(self): @@ -221,7 +229,7 @@ class FakeAutoScalingGroup(BaseModel): def update(self, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, - health_check_period, health_check_type, load_balancers, + health_check_period, health_check_type, placement_group, termination_policies): if availability_zones: self.availability_zones = availability_zones @@ -259,27 +267,8 @@ class FakeAutoScalingGroup(BaseModel): # Need more instances count_needed = int(self.desired_capacity) - int(curr_instance_count) - propagated_tags = {} - for tag in self.tags: - # boto uses 'propagate_at_launch - # boto3 and cloudformation use PropagateAtLaunch - if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': - propagated_tags[tag['key']] = tag['value'] - if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: - propagated_tags[tag['Key']] = tag['Value'] - - propagated_tags[ASG_NAME_TAG] = self.name - reservation = self.autoscaling_backend.ec2_backend.add_instances( - self.launch_config.image_id, - count_needed, - self.launch_config.user_data, - self.launch_config.security_groups, - instance_type=self.launch_config.instance_type, - tags={'instance': propagated_tags} - ) - for instance in reservation.instances: - instance.autoscaling_group = self - self.instance_states.append(InstanceState(instance)) + propagated_tags = self.get_propagated_tags() + self.replace_autoscaling_group_instances(count_needed, propagated_tags) else: # Need to remove some instances count_to_remove = curr_instance_count - self.desired_capacity @@ -290,20 +279,51 @@ class FakeAutoScalingGroup(BaseModel): instance_ids_to_remove) self.instance_states = self.instance_states[count_to_remove:] + def get_propagated_tags(self): + propagated_tags = {} + for tag in self.tags: + # boto uses 'propagate_at_launch + # boto3 and cloudformation use PropagateAtLaunch + if 'propagate_at_launch' in tag and tag['propagate_at_launch'] == 'true': + propagated_tags[tag['key']] = tag['value'] + if 'PropagateAtLaunch' in tag and tag['PropagateAtLaunch']: + propagated_tags[tag['Key']] = tag['Value'] + return propagated_tags + + def replace_autoscaling_group_instances(self, count_needed, propagated_tags): + propagated_tags[ASG_NAME_TAG] = self.name + reservation = self.autoscaling_backend.ec2_backend.add_instances( + self.launch_config.image_id, + count_needed, + self.launch_config.user_data, + self.launch_config.security_groups, + instance_type=self.launch_config.instance_type, + tags={'instance': propagated_tags} + ) + for instance in reservation.instances: + instance.autoscaling_group = self + self.instance_states.append(InstanceState(instance)) + + def append_target_groups(self, target_group_arns): + append = [x for x in target_group_arns if x not in self.target_group_arns] + self.target_group_arns.extend(append) + class AutoScalingBackend(BaseBackend): - def __init__(self, ec2_backend, elb_backend): + def __init__(self, ec2_backend, elb_backend, elbv2_backend): self.autoscaling_groups = OrderedDict() self.launch_configurations = OrderedDict() self.policies = {} self.ec2_backend = ec2_backend self.elb_backend = elb_backend + self.elbv2_backend = elbv2_backend def reset(self): ec2_backend = self.ec2_backend elb_backend = self.elb_backend + elbv2_backend = self.elbv2_backend self.__dict__ = {} - self.__init__(ec2_backend, elb_backend) + self.__init__(ec2_backend, elb_backend, elbv2_backend) def create_launch_configuration(self, name, image_id, key_name, kernel_id, ramdisk_id, security_groups, user_data, instance_type, @@ -338,12 +358,13 @@ class AutoScalingBackend(BaseBackend): def delete_launch_configuration(self, launch_configuration_name): self.launch_configurations.pop(launch_configuration_name, None) - def create_autoscaling_group(self, name, availability_zones, + def create_auto_scaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, load_balancers, - placement_group, termination_policies, tags): + target_group_arns, placement_group, + termination_policies, tags): def make_int(value): return int(value) if value is not None else value @@ -369,6 +390,7 @@ class AutoScalingBackend(BaseBackend): health_check_period=health_check_period, health_check_type=health_check_type, load_balancers=load_balancers, + target_group_arns=target_group_arns, placement_group=placement_group, termination_policies=termination_policies, autoscaling_backend=self, @@ -377,38 +399,79 @@ class AutoScalingBackend(BaseBackend): self.autoscaling_groups[name] = group self.update_attached_elbs(group.name) + self.update_attached_target_groups(group.name) return group - def update_autoscaling_group(self, name, availability_zones, + def update_auto_scaling_group(self, name, availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, - health_check_type, load_balancers, - placement_group, termination_policies): + health_check_type, placement_group, + termination_policies): group = self.autoscaling_groups[name] group.update(availability_zones, desired_capacity, max_size, min_size, launch_config_name, vpc_zone_identifier, default_cooldown, health_check_period, health_check_type, - load_balancers, placement_group, termination_policies) + placement_group, termination_policies) return group - def describe_autoscaling_groups(self, names): + def describe_auto_scaling_groups(self, names): groups = self.autoscaling_groups.values() if names: return [group for group in groups if group.name in names] else: return list(groups) - def delete_autoscaling_group(self, group_name): + def delete_auto_scaling_group(self, group_name): self.set_desired_capacity(group_name, 0) self.autoscaling_groups.pop(group_name, None) - def describe_autoscaling_instances(self): + def describe_auto_scaling_instances(self): instance_states = [] for group in self.autoscaling_groups.values(): instance_states.extend(group.instance_states) return instance_states + def attach_instances(self, group_name, instance_ids): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + if (original_size + len(instance_ids)) > group.max_size: + raise ResourceContentionError + else: + group.desired_capacity = original_size + len(instance_ids) + new_instances = [InstanceState(self.ec2_backend.get_instance(x)) for x in instance_ids] + for instance in new_instances: + self.ec2_backend.create_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + group.instance_states.extend(new_instances) + self.update_attached_elbs(group.name) + + def set_instance_health(self, instance_id, health_status, should_respect_grace_period): + instance = self.ec2_backend.get_instance(instance_id) + instance_state = next(instance_state for group in self.autoscaling_groups.values() + for instance_state in group.instance_states if instance_state.instance.id == instance.id) + instance_state.health_status = health_status + + def detach_instances(self, group_name, instance_ids, should_decrement): + group = self.autoscaling_groups[group_name] + original_size = len(group.instance_states) + + detached_instances = [x for x in group.instance_states if x.instance.id in instance_ids] + for instance in detached_instances: + self.ec2_backend.delete_tags([instance.instance.id], {ASG_NAME_TAG: group.name}) + + new_instance_state = [x for x in group.instance_states if x.instance.id not in instance_ids] + group.instance_states = new_instance_state + + if should_decrement: + group.desired_capacity = original_size - len(instance_ids) + else: + count_needed = len(instance_ids) + group.replace_autoscaling_group_instances(count_needed, group.get_propagated_tags()) + + self.update_attached_elbs(group_name) + return detached_instances + def set_desired_capacity(self, group_name, desired_capacity): group = self.autoscaling_groups[group_name] group.set_desired_capacity(desired_capacity) @@ -461,6 +524,10 @@ class AutoScalingBackend(BaseBackend): group_instance_ids = set( state.instance.id for state in group.instance_states) + # skip this if group.load_balancers is empty + # otherwise elb_backend.describe_load_balancers returns all available load balancers + if not group.load_balancers: + return try: elbs = self.elb_backend.describe_load_balancers( names=group.load_balancers) @@ -475,8 +542,25 @@ class AutoScalingBackend(BaseBackend): self.elb_backend.deregister_instances( elb.name, elb_instace_ids - group_instance_ids) - def create_or_update_tags(self, tags): + def update_attached_target_groups(self, group_name): + group = self.autoscaling_groups[group_name] + group_instance_ids = set( + state.instance.id for state in group.instance_states) + # no action necessary if target_group_arns is empty + if not group.target_group_arns: + return + + target_groups = self.elbv2_backend.describe_target_groups( + target_group_arns=group.target_group_arns, + load_balancer_arn=None, + names=None) + + for target_group in target_groups: + asg_targets = [{'id': x, 'port': target_group.port} for x in group_instance_ids] + self.elbv2_backend.register_targets(target_group.arn, (asg_targets)) + + def create_or_update_tags(self, tags): for tag in tags: group_name = tag["resource_id"] group = self.autoscaling_groups[group_name] @@ -496,8 +580,42 @@ class AutoScalingBackend(BaseBackend): group.tags = new_tags + def attach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group.load_balancers.extend( + [x for x in load_balancer_names if x not in group.load_balancers]) + self.update_attached_elbs(group_name) + + def describe_load_balancers(self, group_name): + return self.autoscaling_groups[group_name].load_balancers + + def detach_load_balancers(self, group_name, load_balancer_names): + group = self.autoscaling_groups[group_name] + group_instance_ids = set( + state.instance.id for state in group.instance_states) + elbs = self.elb_backend.describe_load_balancers(names=group.load_balancers) + for elb in elbs: + self.elb_backend.deregister_instances( + elb.name, group_instance_ids) + group.load_balancers = [x for x in group.load_balancers if x not in load_balancer_names] + + def attach_load_balancer_target_groups(self, group_name, target_group_arns): + group = self.autoscaling_groups[group_name] + group.append_target_groups(target_group_arns) + self.update_attached_target_groups(group_name) + + def describe_load_balancer_target_groups(self, group_name): + return self.autoscaling_groups[group_name].target_group_arns + + def detach_load_balancer_target_groups(self, group_name, target_group_arns): + group = self.autoscaling_groups[group_name] + group.target_group_arns = [x for x in group.target_group_arns if x not in target_group_arns] + for target_group in target_group_arns: + asg_targets = [{'id': x.instance.id} for x in group.instance_states] + self.elbv2_backend.deregister_targets(target_group, (asg_targets)) + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): autoscaling_backends[region] = AutoScalingBackend( - ec2_backend, elb_backends[region]) + ec2_backend, elb_backends[region], elbv2_backends[region]) diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index 2c3bddd79..d3f9ca483 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse +from moto.core.utils import amz_crc32, amzn_request_id from .models import autoscaling_backends @@ -66,7 +67,7 @@ class AutoScalingResponse(BaseResponse): return template.render() def create_auto_scaling_group(self): - self.autoscaling_backend.create_autoscaling_group( + self.autoscaling_backend.create_auto_scaling_group( name=self._get_param('AutoScalingGroupName'), availability_zones=self._get_multi_param( 'AvailabilityZones.member'), @@ -79,6 +80,7 @@ class AutoScalingResponse(BaseResponse): health_check_period=self._get_int_param('HealthCheckGracePeriod'), health_check_type=self._get_param('HealthCheckType'), load_balancers=self._get_multi_param('LoadBalancerNames.member'), + target_group_arns=self._get_multi_param('TargetGroupARNs.member'), placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), @@ -87,10 +89,78 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(CREATE_AUTOSCALING_GROUP_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def attach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param('InstanceIds.member') + self.autoscaling_backend.attach_instances( + group_name, instance_ids) + template = self.response_template(ATTACH_INSTANCES_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def set_instance_health(self): + instance_id = self._get_param('InstanceId') + health_status = self._get_param("HealthStatus") + if health_status not in ['Healthy', 'Unhealthy']: + raise ValueError('Valid instance health states are: [Healthy, Unhealthy]') + should_respect_grace_period = self._get_param("ShouldRespectGracePeriod") + self.autoscaling_backend.set_instance_health(instance_id, health_status, should_respect_grace_period) + template = self.response_template(SET_INSTANCE_HEALTH_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def detach_instances(self): + group_name = self._get_param('AutoScalingGroupName') + instance_ids = self._get_multi_param('InstanceIds.member') + should_decrement_string = self._get_param('ShouldDecrementDesiredCapacity') + if should_decrement_string == 'true': + should_decrement = True + else: + should_decrement = False + detached_instances = self.autoscaling_backend.detach_instances( + group_name, instance_ids, should_decrement) + template = self.response_template(DETACH_INSTANCES_TEMPLATE) + return template.render(detached_instances=detached_instances) + + @amz_crc32 + @amzn_request_id + def attach_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self._get_multi_param('TargetGroupARNs.member') + + self.autoscaling_backend.attach_load_balancer_target_groups( + group_name, target_group_arns) + template = self.response_template(ATTACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def describe_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self.autoscaling_backend.describe_load_balancer_target_groups( + group_name) + template = self.response_template(DESCRIBE_LOAD_BALANCER_TARGET_GROUPS) + return template.render(target_group_arns=target_group_arns) + + @amz_crc32 + @amzn_request_id + def detach_load_balancer_target_groups(self): + group_name = self._get_param('AutoScalingGroupName') + target_group_arns = self._get_multi_param('TargetGroupARNs.member') + + self.autoscaling_backend.detach_load_balancer_target_groups( + group_name, target_group_arns) + template = self.response_template(DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE) + return template.render() + def describe_auto_scaling_groups(self): names = self._get_multi_param("AutoScalingGroupNames.member") token = self._get_param("NextToken") - all_groups = self.autoscaling_backend.describe_autoscaling_groups(names) + all_groups = self.autoscaling_backend.describe_auto_scaling_groups(names) all_names = [group.name for group in all_groups] if token: start = all_names.index(token) + 1 @@ -107,7 +177,7 @@ class AutoScalingResponse(BaseResponse): return template.render(groups=groups, next_token=next_token) def update_auto_scaling_group(self): - self.autoscaling_backend.update_autoscaling_group( + self.autoscaling_backend.update_auto_scaling_group( name=self._get_param('AutoScalingGroupName'), availability_zones=self._get_multi_param( 'AvailabilityZones.member'), @@ -119,7 +189,6 @@ class AutoScalingResponse(BaseResponse): default_cooldown=self._get_int_param('DefaultCooldown'), health_check_period=self._get_int_param('HealthCheckGracePeriod'), health_check_type=self._get_param('HealthCheckType'), - load_balancers=self._get_multi_param('LoadBalancerNames.member'), placement_group=self._get_param('PlacementGroup'), termination_policies=self._get_multi_param( 'TerminationPolicies.member'), @@ -129,7 +198,7 @@ class AutoScalingResponse(BaseResponse): def delete_auto_scaling_group(self): group_name = self._get_param('AutoScalingGroupName') - self.autoscaling_backend.delete_autoscaling_group(group_name) + self.autoscaling_backend.delete_auto_scaling_group(group_name) template = self.response_template(DELETE_AUTOSCALING_GROUP_TEMPLATE) return template.render() @@ -149,7 +218,7 @@ class AutoScalingResponse(BaseResponse): return template.render() def describe_auto_scaling_instances(self): - instance_states = self.autoscaling_backend.describe_autoscaling_instances() + instance_states = self.autoscaling_backend.describe_auto_scaling_instances() template = self.response_template( DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE) return template.render(instance_states=instance_states) @@ -186,6 +255,34 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(EXECUTE_POLICY_TEMPLATE) return template.render() + @amz_crc32 + @amzn_request_id + def attach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.attach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(ATTACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + + @amz_crc32 + @amzn_request_id + def describe_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancers = self.autoscaling_backend.describe_load_balancers(group_name) + template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) + return template.render(load_balancers=load_balancers) + + @amz_crc32 + @amzn_request_id + def detach_load_balancers(self): + group_name = self._get_param('AutoScalingGroupName') + load_balancer_names = self._get_multi_param("LoadBalancerNames.member") + self.autoscaling_backend.detach_load_balancers( + group_name, load_balancer_names) + template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -217,7 +314,7 @@ DESCRIBE_LAUNCH_CONFIGURATIONS_TEMPLATE = """{{ launch_configuration.instance_type }} arn:aws:autoscaling:us-east-1:803981987763:launchConfiguration: - 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/my-test-lc + 9dbbbf87-6141-428a-a409-0752edbe6cad:launchConfigurationName/{{ launch_configuration.name }} {% if launch_configuration.block_device_mappings %} {% for mount_point, mapping in launch_configuration.block_device_mappings.items() %} @@ -284,6 +381,72 @@ CREATE_AUTOSCALING_GROUP_TEMPLATE = """ + + + +{{ requestid }} + +""" + +ATTACH_INSTANCES_TEMPLATE = """ + + + +{{ requestid }} + +""" + +DESCRIBE_LOAD_BALANCER_TARGET_GROUPS = """ + + + {% for arn in target_group_arns %} + + {{ arn }} + Added + + {% endfor %} + + + +{{ requestid }} + +""" + +DETACH_INSTANCES_TEMPLATE = """ + + + {% for instance in detached_instances %} + + 5091cb52-547a-47ce-a236-c9ccbc2cb2c9EXAMPLE + {{ group_name }} + + At 2017-10-15T15:55:21Z instance {{ instance.instance.id }} was detached in response to a user request. + + Detaching EC2 instance: {{ instance.instance.id }} + 2017-10-15T15:55:21Z + 2017-10-15T15:55:21Z + InProgress + InProgress + 50 +
details
+
+ {% endfor %} +
+
+ +{{ requestid }} + +
""" + +DETACH_LOAD_BALANCER_TARGET_GROUPS_TEMPLATE = """ + + + +{{ requestid }} + +""" + DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ @@ -309,7 +472,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% for instance_state in group.instance_states %} - HEALTHY + {{ instance_state.health_status }} us-east-1e {{ instance_state.instance.id }} {{ group.launch_config_name }} @@ -341,7 +504,7 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """{{ group.health_check_period }} {{ group.default_cooldown }} arn:aws:autoscaling:us-east-1:803981987763:autoScalingGroup:ca861182-c8f9-4ca7-b1eb-cd35505f5ebb - :autoScalingGroupName/my-test-asg-lbs + :autoScalingGroupName/{{ group.name }} {% if group.termination_policies %} {% for policy in group.termination_policies %} @@ -384,7 +547,7 @@ DESCRIBE_AUTOSCALING_INSTANCES_TEMPLATE = """ {% for instance_state in instance_states %} - HEALTHY + {{ instance_state.health_status }} {{ instance_state.instance.autoscaling_group.name }} us-east-1e {{ instance_state.instance.id }} @@ -450,3 +613,40 @@ DELETE_POLICY_TEMPLATE = """ + + +{{ requestid }} + +""" + +DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ + + + {% for load_balancer in load_balancers %} + + {{ load_balancer }} + Added + + {% endfor %} + + + +{{ requestid }} + +""" + +DETACH_LOAD_BALANCERS_TEMPLATE = """ + + +{{ requestid }} + +""" + +SET_INSTANCE_HEALTH_TEMPLATE = """ + + +{{ requestid }} + +""" diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 935abbcd6..6f31a2611 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import base64 from collections import defaultdict +import copy import datetime import docker.errors import hashlib @@ -17,18 +18,23 @@ import tarfile import calendar import threading import traceback +import weakref import requests.adapters import boto.awslambda from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError from moto.core.utils import unix_time_millis from moto.s3.models import s3_backend from moto.logs.models import logs_backends from moto.s3.exceptions import MissingBucket, MissingKey from moto import settings +from .utils import make_function_arn logger = logging.getLogger(__name__) +ACCOUNT_ID = '123456789012' + try: from tempfile import TemporaryDirectory @@ -121,7 +127,7 @@ class _DockerDataVolumeContext: class LambdaFunction(BaseModel): - def __init__(self, spec, region, validate_s3=True): + def __init__(self, spec, region, validate_s3=True, version=1): # required self.region = region self.code = spec['Code'] @@ -161,7 +167,7 @@ class LambdaFunction(BaseModel): 'VpcConfig', {'SubnetIds': [], 'SecurityGroupIds': []}) # auto-generated - self.version = '$LATEST' + self.version = version self.last_modified = datetime.datetime.utcnow().strftime( '%Y-%m-%d %H:%M:%S') @@ -203,11 +209,15 @@ class LambdaFunction(BaseModel): self.code_size = key.size self.code_sha_256 = hashlib.sha256(key.value).hexdigest() - self.function_arn = 'arn:aws:lambda:{}:123456789012:function:{}'.format( - self.region, self.function_name) + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) self.tags = dict() + def set_version(self, version): + self.function_arn = make_function_arn(self.region, ACCOUNT_ID, self.function_name, version) + self.version = version + self.last_modified = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S') + @property def vpc_config(self): config = self._vpc_config.copy() @@ -231,7 +241,7 @@ class LambdaFunction(BaseModel): "Role": self.role, "Runtime": self.run_time, "Timeout": self.timeout, - "Version": self.version, + "Version": str(self.version), "VpcConfig": self.vpc_config, } @@ -298,7 +308,12 @@ class LambdaFunction(BaseModel): volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs) finally: if container: - exit_code = container.wait() + try: + exit_code = container.wait(timeout=300) + except requests.exceptions.ReadTimeout: + exit_code = -1 + container.stop() + container.kill() output = container.logs(stdout=False, stderr=True) output += container.logs(stdout=True, stderr=False) container.remove() @@ -384,8 +399,7 @@ class LambdaFunction(BaseModel): from moto.cloudformation.exceptions import \ UnformattedGetAttTemplateException if attribute_name == 'Arn': - return 'arn:aws:lambda:{0}:123456789012:function:{1}'.format( - self.region, self.function_name) + return make_function_arn(self.region, ACCOUNT_ID, self.function_name) raise UnformattedGetAttTemplateException() @staticmethod @@ -441,9 +455,121 @@ class LambdaVersion(BaseModel): return LambdaVersion(spec) +class LambdaStorage(object): + def __init__(self): + # Format 'func_name' {'alias': {}, 'versions': []} + self._functions = {} + self._arns = weakref.WeakValueDictionary() + + def _get_latest(self, name): + return self._functions[name]['latest'] + + def _get_version(self, name, version): + index = version - 1 + + try: + return self._functions[name]['versions'][index] + except IndexError: + return None + + def _get_alias(self, name, alias): + return self._functions[name]['alias'].get(alias, None) + + def get_function(self, name, qualifier=None): + if name not in self._functions: + return None + + if qualifier is None: + return self._get_latest(name) + + try: + return self._get_version(name, int(qualifier)) + except ValueError: + return self._functions[name]['latest'] + + def get_arn(self, arn): + return self._arns.get(arn, None) + + def put_function(self, fn): + """ + :param fn: Function + :type fn: LambdaFunction + """ + if fn.function_name in self._functions: + self._functions[fn.function_name]['latest'] = fn + else: + self._functions[fn.function_name] = { + 'latest': fn, + 'versions': [], + 'alias': weakref.WeakValueDictionary() + } + + self._arns[fn.function_arn] = fn + + def publish_function(self, name): + if name not in self._functions: + return None + if not self._functions[name]['latest']: + return None + + new_version = len(self._functions[name]['versions']) + 1 + fn = copy.copy(self._functions[name]['latest']) + fn.set_version(new_version) + + self._functions[name]['versions'].append(fn) + return fn + + def del_function(self, name, qualifier=None): + if name in self._functions: + if not qualifier: + # Something is still reffing this so delete all arns + latest = self._functions[name]['latest'].function_arn + del self._arns[latest] + + for fn in self._functions[name]['versions']: + del self._arns[fn.function_arn] + + del self._functions[name] + + return True + + elif qualifier == '$LATEST': + self._functions[name]['latest'] = None + + # If theres no functions left + if not self._functions[name]['versions'] and not self._functions[name]['latest']: + del self._functions[name] + + return True + + else: + fn = self.get_function(name, qualifier) + if fn: + self._functions[name]['versions'].remove(fn) + + # If theres no functions left + if not self._functions[name]['versions'] and not self._functions[name]['latest']: + del self._functions[name] + + return True + + return False + + def all(self): + result = [] + + for function_group in self._functions.values(): + if function_group['latest'] is not None: + result.append(function_group['latest']) + + result.extend(function_group['versions']) + + return result + + class LambdaBackend(BaseBackend): def __init__(self, region_name): - self._functions = {} + self._lambdas = LambdaStorage() self.region_name = region_name def reset(self): @@ -451,31 +577,31 @@ class LambdaBackend(BaseBackend): self.__dict__ = {} self.__init__(region_name) - def has_function(self, function_name): - return function_name in self._functions - - def has_function_arn(self, function_arn): - return self.get_function_by_arn(function_arn) is not None - def create_function(self, spec): - fn = LambdaFunction(spec, self.region_name) - self._functions[fn.function_name] = fn + function_name = spec.get('FunctionName', None) + if function_name is None: + raise RESTError('InvalidParameterValueException', 'Missing FunctionName') + + fn = LambdaFunction(spec, self.region_name, version='$LATEST') + + self._lambdas.put_function(fn) + return fn - def get_function(self, function_name): - return self._functions[function_name] + def publish_function(self, function_name): + return self._lambdas.publish_function(function_name) + + def get_function(self, function_name, qualifier=None): + return self._lambdas.get_function(function_name, qualifier) def get_function_by_arn(self, function_arn): - for function in self._functions.values(): - if function.function_arn == function_arn: - return function - return None + return self._lambdas.get_arn(function_arn) - def delete_function(self, function_name): - del self._functions[function_name] + def delete_function(self, function_name, qualifier=None): + return self._lambdas.del_function(function_name, qualifier) def list_functions(self): - return self._functions.values() + return self._lambdas.all() def send_message(self, function_name, message): event = { @@ -510,23 +636,31 @@ class LambdaBackend(BaseBackend): ] } - self._functions[function_name].invoke(json.dumps(event), {}, {}) + self._functions[function_name][-1].invoke(json.dumps(event), {}, {}) pass def list_tags(self, resource): return self.get_function_by_arn(resource).tags def tag_resource(self, resource, tags): - self.get_function_by_arn(resource).tags.update(tags) + fn = self.get_function_by_arn(resource) + if not fn: + return False + + fn.tags.update(tags) + return True def untag_resource(self, resource, tagKeys): - function = self.get_function_by_arn(resource) - for key in tagKeys: - try: - del function.tags[key] - except KeyError: - pass - # Don't care + fn = self.get_function_by_arn(resource) + if fn: + for key in tagKeys: + try: + del fn.tags[key] + except KeyError: + pass + # Don't care + return True + return False def add_policy(self, function_name, policy): self.get_function(function_name).policy = policy diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 5215f63c5..4e8759b2f 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -5,14 +5,31 @@ import re try: from urllib import unquote - from urlparse import urlparse, parse_qs except: - from urllib.parse import unquote, urlparse, parse_qs + from urllib.parse import unquote +from moto.core.utils import amz_crc32, amzn_request_id from moto.core.responses import BaseResponse +from .models import lambda_backends class LambdaResponse(BaseResponse): + @property + def json_body(self): + """ + :return: JSON + :rtype: dict + """ + return json.loads(self.body) + + @property + def lambda_backend(self): + """ + Get backend + :return: Lambda Backend + :rtype: moto.awslambda.models.LambdaBackend + """ + return lambda_backends[self.region] def root(self, request, full_url, headers): self.setup_class(request, full_url, headers) @@ -32,6 +49,18 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + def versions(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + if request.method == 'GET': + # This is ListVersionByFunction + raise ValueError("Cannot handle request") + elif request.method == 'POST': + return self._publish_function(request, full_url, headers) + else: + raise ValueError("Cannot handle request") + + @amz_crc32 + @amzn_request_id def invoke(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': @@ -39,6 +68,8 @@ class LambdaResponse(BaseResponse): else: raise ValueError("Cannot handle request") + @amz_crc32 + @amzn_request_id def invoke_async(self, request, full_url, headers): self.setup_class(request, full_url, headers) if request.method == 'POST': @@ -88,13 +119,12 @@ class LambdaResponse(BaseResponse): def _invoke(self, request, full_url): response_headers = {} - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-2] + function_name = self.path.rsplit('/', 2)[-2] + qualifier = self._get_param('qualifier') - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) + fn = self.lambda_backend.get_function(function_name, qualifier) + if fn: payload = fn.invoke(self.body, self.headers, response_headers) response_headers['Content-Length'] = str(len(payload)) return 202, response_headers, payload @@ -103,66 +133,70 @@ class LambdaResponse(BaseResponse): def _invoke_async(self, request, full_url): response_headers = {} - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-3] - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) - fn.invoke(self.body, self.headers, response_headers) - response_headers['Content-Length'] = str(0) - return 202, response_headers, "" + function_name = self.path.rsplit('/', 3)[-3] + + fn = self.lambda_backend.get_function(function_name, None) + if fn: + payload = fn.invoke(self.body, self.headers, response_headers) + response_headers['Content-Length'] = str(len(payload)) + return 202, response_headers, payload else: return 404, response_headers, "{}" def _list_functions(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - return 200, {}, json.dumps({ - "Functions": [fn.get_configuration() for fn in lambda_backend.list_functions()], - # "NextMarker": str(uuid.uuid4()), - }) + result = { + 'Functions': [] + } + + for fn in self.lambda_backend.list_functions(): + json_data = fn.get_configuration() + + result['Functions'].append(json_data) + + return 200, {}, json.dumps(result) def _create_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - spec = json.loads(self.body) try: - fn = lambda_backend.create_function(spec) + fn = self.lambda_backend.create_function(self.json_body) except ValueError as e: return 400, {}, json.dumps({"Error": {"Code": e.args[0], "Message": e.args[1]}}) else: config = fn.get_configuration() return 201, {}, json.dumps(config) + def _publish_function(self, request, full_url, headers): + function_name = self.path.rsplit('/', 2)[-2] + + fn = self.lambda_backend.publish_function(function_name) + if fn: + config = fn.get_configuration() + return 200, {}, json.dumps(config) + else: + return 404, {}, "{}" + def _delete_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) + function_name = self.path.rsplit('/', 1)[-1] + qualifier = self._get_param('Qualifier', None) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-1] - - if lambda_backend.has_function(function_name): - lambda_backend.delete_function(function_name) + if self.lambda_backend.delete_function(function_name, qualifier): return 204, {}, "" else: return 404, {}, "{}" def _get_function(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) + function_name = self.path.rsplit('/', 1)[-1] + qualifier = self._get_param('Qualifier', None) - path = request.path if hasattr(request, 'path') else request.path_url - function_name = path.split('/')[-1] + fn = self.lambda_backend.get_function(function_name, qualifier) - if lambda_backend.has_function(function_name): - fn = lambda_backend.get_function(function_name) + if fn: code = fn.get_code() + return 200, {}, json.dumps(code) else: return 404, {}, "{}" - def get_lambda_backend(self, full_url): - from moto.awslambda.models import lambda_backends - region = self._get_aws_region(full_url) - return lambda_backends[region] - def _get_aws_region(self, full_url): region = re.search(self.region_regex, full_url) if region: @@ -171,41 +205,27 @@ class LambdaResponse(BaseResponse): return self.default_region def _list_tags(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1]) - - if lambda_backend.has_function_arn(function_arn): - function = lambda_backend.get_function_by_arn(function_arn) - return 200, {}, json.dumps(dict(Tags=function.tags)) + fn = self.lambda_backend.get_function_by_arn(function_arn) + if fn: + return 200, {}, json.dumps({'Tags': fn.tags}) else: return 404, {}, "{}" def _tag_resource(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1]) - - spec = json.loads(self.body) - - if lambda_backend.has_function_arn(function_arn): - lambda_backend.tag_resource(function_arn, spec['Tags']) + if self.lambda_backend.tag_resource(function_arn, self.json_body['Tags']): return 200, {}, "{}" else: return 404, {}, "{}" def _untag_resource(self, request, full_url): - lambda_backend = self.get_lambda_backend(full_url) + function_arn = unquote(self.path.rsplit('/', 1)[-1]) + tag_keys = self.querystring['tagKeys'] - path = request.path if hasattr(request, 'path') else request.path_url - function_arn = unquote(path.split('/')[-1].split('?')[0]) - - tag_keys = parse_qs(urlparse(full_url).query)['tagKeys'] - - if lambda_backend.has_function_arn(function_arn): - lambda_backend.untag_resource(function_arn, tag_keys) + if self.lambda_backend.untag_resource(function_arn, tag_keys): return 204, {}, "{}" else: return 404, {}, "{}" diff --git a/moto/awslambda/urls.py b/moto/awslambda/urls.py index 005785f19..7c4d064dc 100644 --- a/moto/awslambda/urls.py +++ b/moto/awslambda/urls.py @@ -10,6 +10,7 @@ response = LambdaResponse() url_paths = { '{0}/(?P[^/]+)/functions/?$': response.root, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/?$': response.function, + r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/versions/?$': response.versions, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invocations/?$': response.invoke, r'{0}/(?P[^/]+)/functions/(?P[\w_-]+)/invoke-async/?$': response.invoke_async, r'{0}/(?P[^/]+)/tags/(?P.+)': response.tag, diff --git a/moto/awslambda/utils.py b/moto/awslambda/utils.py new file mode 100644 index 000000000..88146d34f --- /dev/null +++ b/moto/awslambda/utils.py @@ -0,0 +1,15 @@ +from collections import namedtuple + +ARN = namedtuple('ARN', ['region', 'account', 'function_name', 'version']) + + +def make_function_arn(region, account, name, version='1'): + return 'arn:aws:lambda:{0}:{1}:function:{2}:{3}'.format(region, account, name, version) + + +def split_function_arn(arn): + arn = arn.replace('arn:aws:lambda:') + + region, account, _, name, version = arn.split(':') + + return ARN(region, account, name, version) diff --git a/moto/backends.py b/moto/backends.py index 24a8b6c2b..6baf35f05 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -35,11 +35,17 @@ from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends from moto.xray import xray_backends +from moto.iot import iot_backends +from moto.iotdata import iotdata_backends +from moto.batch import batch_backends +from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends + BACKENDS = { 'acm': acm_backends, 'apigateway': apigateway_backends, 'autoscaling': autoscaling_backends, + 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, 'datapipeline': datapipeline_backends, @@ -72,7 +78,10 @@ BACKENDS = { 'sts': sts_backends, 'route53': route53_backends, 'lambda': lambda_backends, - 'xray': xray_backends + 'xray': xray_backends, + 'resourcegroupstaggingapi': resourcegroupstaggingapi_backends, + 'iot': iot_backends, + 'iot-data': iotdata_backends, } diff --git a/moto/batch/__init__.py b/moto/batch/__init__.py new file mode 100644 index 000000000..6002b6fc7 --- /dev/null +++ b/moto/batch/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import batch_backends +from ..core.models import base_decorator + +batch_backend = batch_backends['us-east-1'] +mock_batch = base_decorator(batch_backends) diff --git a/moto/batch/exceptions.py b/moto/batch/exceptions.py new file mode 100644 index 000000000..a71e54ce3 --- /dev/null +++ b/moto/batch/exceptions.py @@ -0,0 +1,37 @@ +from __future__ import unicode_literals +import json + + +class AWSError(Exception): + CODE = None + STATUS = 400 + + def __init__(self, message, code=None, status=None): + self.message = message + self.code = code if code is not None else self.CODE + self.status = status if status is not None else self.STATUS + + def response(self): + return json.dumps({'__type': self.code, 'message': self.message}), dict(status=self.status) + + +class InvalidRequestException(AWSError): + CODE = 'InvalidRequestException' + + +class InvalidParameterValueException(AWSError): + CODE = 'InvalidParameterValue' + + +class ValidationError(AWSError): + CODE = 'ValidationError' + + +class InternalFailure(AWSError): + CODE = 'InternalFailure' + STATUS = 500 + + +class ClientException(AWSError): + CODE = 'ClientException' + STATUS = 400 diff --git a/moto/batch/models.py b/moto/batch/models.py new file mode 100644 index 000000000..8b3b81ccb --- /dev/null +++ b/moto/batch/models.py @@ -0,0 +1,1042 @@ +from __future__ import unicode_literals +import boto3 +import re +import requests.adapters +from itertools import cycle +import six +import datetime +import time +import uuid +import logging +import docker +import functools +import threading +import dateutil.parser +from moto.core import BaseBackend, BaseModel +from moto.iam import iam_backends +from moto.ec2 import ec2_backends +from moto.ecs import ecs_backends +from moto.logs import logs_backends + +from .exceptions import InvalidParameterValueException, InternalFailure, ClientException +from .utils import make_arn_for_compute_env, make_arn_for_job_queue, make_arn_for_task_def, lowercase_first_key +from moto.ec2.exceptions import InvalidSubnetIdError +from moto.ec2.models import INSTANCE_TYPES as EC2_INSTANCE_TYPES +from moto.iam.exceptions import IAMNotFoundException + + +_orig_adapter_send = requests.adapters.HTTPAdapter.send +logger = logging.getLogger(__name__) +DEFAULT_ACCOUNT_ID = 123456789012 +COMPUTE_ENVIRONMENT_NAME_REGEX = re.compile(r'^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$') + + +def datetime2int(date): + return int(time.mktime(date.timetuple())) + + +class ComputeEnvironment(BaseModel): + def __init__(self, compute_environment_name, _type, state, compute_resources, service_role, region_name): + self.name = compute_environment_name + self.env_type = _type + self.state = state + self.compute_resources = compute_resources + self.service_role = service_role + self.arn = make_arn_for_compute_env(DEFAULT_ACCOUNT_ID, compute_environment_name, region_name) + + self.instances = [] + self.ecs_arn = None + self.ecs_name = None + + def add_instance(self, instance): + self.instances.append(instance) + + def set_ecs(self, arn, name): + self.ecs_arn = arn + self.ecs_name = name + + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + env = backend.create_compute_environment( + resource_name, + properties['Type'], + properties.get('State', 'ENABLED'), + lowercase_first_key(properties['ComputeResources']), + properties['ServiceRole'] + ) + arn = env[1] + + return backend.get_compute_environment_by_arn(arn) + + +class JobQueue(BaseModel): + def __init__(self, name, priority, state, environments, env_order_json, region_name): + """ + :param name: Job queue name + :type name: str + :param priority: Job queue priority + :type priority: int + :param state: Either ENABLED or DISABLED + :type state: str + :param environments: Compute Environments + :type environments: list of ComputeEnvironment + :param env_order_json: Compute Environments JSON for use when describing + :type env_order_json: list of dict + :param region_name: Region name + :type region_name: str + """ + self.name = name + self.priority = priority + self.state = state + self.environments = environments + self.env_order_json = env_order_json + self.arn = make_arn_for_job_queue(DEFAULT_ACCOUNT_ID, name, region_name) + self.status = 'VALID' + + self.jobs = [] + + def describe(self): + result = { + 'computeEnvironmentOrder': self.env_order_json, + 'jobQueueArn': self.arn, + 'jobQueueName': self.name, + 'priority': self.priority, + 'state': self.state, + 'status': self.status + } + + return result + + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + # Need to deal with difference case from cloudformation compute_resources, e.g. instanceRole vs InstanceRole + # Hacky fix to normalise keys, is making me think I want to start spamming cAsEiNsEnSiTiVe dictionaries + compute_envs = [lowercase_first_key(dict_item) for dict_item in properties['ComputeEnvironmentOrder']] + + queue = backend.create_job_queue( + queue_name=resource_name, + priority=properties['Priority'], + state=properties.get('State', 'ENABLED'), + compute_env_order=compute_envs + ) + arn = queue[1] + + return backend.get_job_queue_by_arn(arn) + + +class JobDefinition(BaseModel): + def __init__(self, name, parameters, _type, container_properties, region_name, revision=0, retry_strategy=0): + self.name = name + self.retries = retry_strategy + self.type = _type + self.revision = revision + self._region = region_name + self.container_properties = container_properties + self.arn = None + self.status = 'INACTIVE' + + if parameters is None: + parameters = {} + self.parameters = parameters + + self._validate() + self._update_arn() + + def _update_arn(self): + self.revision += 1 + self.arn = make_arn_for_task_def(DEFAULT_ACCOUNT_ID, self.name, self.revision, self._region) + + def _validate(self): + if self.type not in ('container',): + raise ClientException('type must be one of "container"') + + # For future use when containers arnt the only thing in batch + if self.type != 'container': + raise NotImplementedError() + + if not isinstance(self.parameters, dict): + raise ClientException('parameters must be a string to string map') + + if 'image' not in self.container_properties: + raise ClientException('containerProperties must contain image') + + if 'memory' not in self.container_properties: + raise ClientException('containerProperties must contain memory') + if self.container_properties['memory'] < 4: + raise ClientException('container memory limit must be greater than 4') + + if 'vcpus' not in self.container_properties: + raise ClientException('containerProperties must contain vcpus') + if self.container_properties['vcpus'] < 1: + raise ClientException('container vcpus limit must be greater than 0') + + def update(self, parameters, _type, container_properties, retry_strategy): + if parameters is None: + parameters = self.parameters + + if _type is None: + _type = self.type + + if container_properties is None: + container_properties = self.container_properties + + if retry_strategy is None: + retry_strategy = self.retries + + return JobDefinition(self.name, parameters, _type, container_properties, region_name=self._region, revision=self.revision, retry_strategy=retry_strategy) + + def describe(self): + result = { + 'jobDefinitionArn': self.arn, + 'jobDefinitionName': self.name, + 'parameters': self.parameters, + 'revision': self.revision, + 'status': self.status, + 'type': self.type + } + if self.container_properties is not None: + result['containerProperties'] = self.container_properties + if self.retries is not None and self.retries > 0: + result['retryStrategy'] = {'attempts': self.retries} + + return result + + @property + def physical_resource_id(self): + return self.arn + + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + backend = batch_backends[region_name] + properties = cloudformation_json['Properties'] + + res = backend.register_job_definition( + def_name=resource_name, + parameters=lowercase_first_key(properties.get('Parameters', {})), + _type='container', + retry_strategy=lowercase_first_key(properties['RetryStrategy']), + container_properties=lowercase_first_key(properties['ContainerProperties']) + ) + + arn = res[1] + + return backend.get_job_definition_by_arn(arn) + + +class Job(threading.Thread, BaseModel): + def __init__(self, name, job_def, job_queue, log_backend): + """ + Docker Job + + :param name: Job Name + :param job_def: Job definition + :type: job_def: JobDefinition + :param job_queue: Job Queue + :param log_backend: Log backend + :type log_backend: moto.logs.models.LogsBackend + """ + threading.Thread.__init__(self) + + self.job_name = name + self.job_id = str(uuid.uuid4()) + self.job_definition = job_def + self.job_queue = job_queue + self.job_state = 'SUBMITTED' # One of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED + self.job_queue.jobs.append(self) + self.job_started_at = datetime.datetime(1970, 1, 1) + self.job_stopped_at = datetime.datetime(1970, 1, 1) + self.job_stopped = False + self.job_stopped_reason = None + + self.stop = False + + self.daemon = True + self.name = 'MOTO-BATCH-' + self.job_id + + self.docker_client = docker.from_env() + self._log_backend = log_backend + + # Unfortunately mocking replaces this method w/o fallback enabled, so we + # need to replace it if we detect it's been mocked + if requests.adapters.HTTPAdapter.send != _orig_adapter_send: + _orig_get_adapter = self.docker_client.api.get_adapter + + def replace_adapter_send(*args, **kwargs): + adapter = _orig_get_adapter(*args, **kwargs) + + if isinstance(adapter, requests.adapters.HTTPAdapter): + adapter.send = functools.partial(_orig_adapter_send, adapter) + return adapter + self.docker_client.api.get_adapter = replace_adapter_send + + def describe(self): + result = { + 'jobDefinition': self.job_definition.arn, + 'jobId': self.job_id, + 'jobName': self.job_name, + 'jobQueue': self.job_queue.arn, + 'startedAt': datetime2int(self.job_started_at), + 'status': self.job_state, + 'dependsOn': [] + } + if self.job_stopped: + result['stoppedAt'] = datetime2int(self.job_stopped_at) + if self.job_stopped_reason is not None: + result['statusReason'] = self.job_stopped_reason + return result + + def run(self): + """ + Run the container. + + Logic is as follows: + Generate container info (eventually from task definition) + Start container + Loop whilst not asked to stop and the container is running. + Get all logs from container between the last time I checked and now. + Convert logs into cloudwatch format + Put logs into cloudwatch + + :return: + """ + try: + self.job_state = 'PENDING' + time.sleep(1) + + image = 'alpine:latest' + cmd = '/bin/sh -c "for a in `seq 1 10`; do echo Hello World; sleep 1; done"' + name = '{0}-{1}'.format(self.job_name, self.job_id) + + self.job_state = 'RUNNABLE' + # TODO setup ecs container instance + time.sleep(1) + + self.job_state = 'STARTING' + container = self.docker_client.containers.run( + image, cmd, + detach=True, + name=name + ) + self.job_state = 'RUNNING' + self.job_started_at = datetime.datetime.now() + try: + # Log collection + logs_stdout = [] + logs_stderr = [] + container.reload() + + # Dodgy hack, we can only check docker logs once a second, but we want to loop more + # so we can stop if asked to in a quick manner, should all go away if we go async + # There also be some dodgyness when sending an integer to docker logs and some + # events seem to be duplicated. + now = datetime.datetime.now() + i = 1 + while container.status == 'running' and not self.stop: + time.sleep(0.15) + if i % 10 == 0: + logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) + logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) + now = datetime.datetime.now() + container.reload() + i += 1 + + # Container should be stopped by this point... unless asked to stop + if container.status == 'running': + container.kill() + + self.job_stopped_at = datetime.datetime.now() + # Get final logs + logs_stderr.extend(container.logs(stdout=False, stderr=True, timestamps=True, since=datetime2int(now)).decode().split('\n')) + logs_stdout.extend(container.logs(stdout=True, stderr=False, timestamps=True, since=datetime2int(now)).decode().split('\n')) + + self.job_state = 'SUCCEEDED' if not self.stop else 'FAILED' + + # Process logs + logs_stdout = [x for x in logs_stdout if len(x) > 0] + logs_stderr = [x for x in logs_stderr if len(x) > 0] + logs = [] + for line in logs_stdout + logs_stderr: + date, line = line.split(' ', 1) + date = dateutil.parser.parse(date) + date = int(date.timestamp()) + logs.append({'timestamp': date, 'message': line.strip()}) + + # Send to cloudwatch + log_group = '/aws/batch/job' + stream_name = '{0}/default/{1}'.format(self.job_definition.name, self.job_id) + self._log_backend.ensure_log_group(log_group, None) + self._log_backend.create_log_stream(log_group, stream_name) + self._log_backend.put_log_events(log_group, stream_name, logs, None) + + except Exception as err: + logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) + self.job_state = 'FAILED' + container.kill() + finally: + container.remove() + except Exception as err: + logger.error('Failed to run AWS Batch container {0}. Error {1}'.format(self.name, err)) + self.job_state = 'FAILED' + + self.job_stopped = True + self.job_stopped_at = datetime.datetime.now() + + def terminate(self, reason): + if not self.stop: + self.stop = True + self.job_stopped_reason = reason + + +class BatchBackend(BaseBackend): + def __init__(self, region_name=None): + super(BatchBackend, self).__init__() + self.region_name = region_name + + self._compute_environments = {} + self._job_queues = {} + self._job_definitions = {} + self._jobs = {} + + @property + def iam_backend(self): + """ + :return: IAM Backend + :rtype: moto.iam.models.IAMBackend + """ + return iam_backends['global'] + + @property + def ec2_backend(self): + """ + :return: EC2 Backend + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + + @property + def ecs_backend(self): + """ + :return: ECS Backend + :rtype: moto.ecs.models.EC2ContainerServiceBackend + """ + return ecs_backends[self.region_name] + + @property + def logs_backend(self): + """ + :return: ECS Backend + :rtype: moto.logs.models.LogsBackend + """ + return logs_backends[self.region_name] + + def reset(self): + region_name = self.region_name + + for job in self._jobs.values(): + if job.job_state not in ('FAILED', 'SUCCEEDED'): + job.stop = True + # Try to join + job.join(0.2) + + self.__dict__ = {} + self.__init__(region_name) + + def get_compute_environment_by_arn(self, arn): + return self._compute_environments.get(arn) + + def get_compute_environment_by_name(self, name): + for comp_env in self._compute_environments.values(): + if comp_env.name == name: + return comp_env + return None + + def get_compute_environment(self, identifier): + """ + Get compute environment by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Compute Environment or None + :rtype: ComputeEnvironment or None + """ + env = self.get_compute_environment_by_arn(identifier) + if env is None: + env = self.get_compute_environment_by_name(identifier) + return env + + def get_job_queue_by_arn(self, arn): + return self._job_queues.get(arn) + + def get_job_queue_by_name(self, name): + for comp_env in self._job_queues.values(): + if comp_env.name == name: + return comp_env + return None + + def get_job_queue(self, identifier): + """ + Get job queue by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job Queue or None + :rtype: JobQueue or None + """ + env = self.get_job_queue_by_arn(identifier) + if env is None: + env = self.get_job_queue_by_name(identifier) + return env + + def get_job_definition_by_arn(self, arn): + return self._job_definitions.get(arn) + + def get_job_definition_by_name(self, name): + for comp_env in self._job_definitions.values(): + if comp_env.name == name: + return comp_env + return None + + def get_job_definition_by_name_revision(self, name, revision): + for job_def in self._job_definitions.values(): + if job_def.name == name and job_def.revision == revision: + return job_def + return None + + def get_job_definition(self, identifier): + """ + Get job defintiion by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job definition or None + :rtype: JobDefinition or None + """ + env = self.get_job_definition_by_arn(identifier) + if env is None: + env = self.get_job_definition_by_name(identifier) + return env + + def get_job_definitions(self, identifier): + """ + Get job defintiion by name or ARN + :param identifier: Name or ARN + :type identifier: str + + :return: Job definition or None + :rtype: list of JobDefinition + """ + result = [] + env = self.get_job_definition_by_arn(identifier) + if env is not None: + result.append(env) + else: + for value in self._job_definitions.values(): + if value.name == identifier: + result.append(value) + + return result + + def get_job_by_id(self, identifier): + """ + Get job by id + :param identifier: Job ID + :type identifier: str + + :return: Job + :rtype: Job + """ + try: + return self._jobs[identifier] + except KeyError: + return None + + def describe_compute_environments(self, environments=None, max_results=None, next_token=None): + envs = set() + if environments is not None: + envs = set(environments) + + result = [] + for arn, environment in self._compute_environments.items(): + # Filter shortcut + if len(envs) > 0 and arn not in envs and environment.name not in envs: + continue + + json_part = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': environment.name, + 'ecsClusterArn': environment.ecs_arn, + 'serviceRole': environment.service_role, + 'state': environment.state, + 'type': environment.env_type, + 'status': 'VALID' + } + if environment.env_type == 'MANAGED': + json_part['computeResources'] = environment.compute_resources + + result.append(json_part) + + return result + + def create_compute_environment(self, compute_environment_name, _type, state, compute_resources, service_role): + # Validate + if COMPUTE_ENVIRONMENT_NAME_REGEX.match(compute_environment_name) is None: + raise InvalidParameterValueException('Compute environment name does not match ^[A-Za-z0-9][A-Za-z0-9_-]{1,126}[A-Za-z0-9]$') + + if self.get_compute_environment_by_name(compute_environment_name) is not None: + raise InvalidParameterValueException('A compute environment already exists with the name {0}'.format(compute_environment_name)) + + # Look for IAM role + try: + self.iam_backend.get_role_by_arn(service_role) + except IAMNotFoundException: + raise InvalidParameterValueException('Could not find IAM role {0}'.format(service_role)) + + if _type not in ('MANAGED', 'UNMANAGED'): + raise InvalidParameterValueException('type {0} must be one of MANAGED | UNMANAGED'.format(service_role)) + + if state is not None and state not in ('ENABLED', 'DISABLED'): + raise InvalidParameterValueException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + if compute_resources is None and _type == 'MANAGED': + raise InvalidParameterValueException('computeResources must be specified when creating a MANAGED environment'.format(state)) + elif compute_resources is not None: + self._validate_compute_resources(compute_resources) + + # By here, all values except SPOT ones have been validated + new_comp_env = ComputeEnvironment( + compute_environment_name, _type, state, + compute_resources, service_role, + region_name=self.region_name + ) + self._compute_environments[new_comp_env.arn] = new_comp_env + + # Ok by this point, everything is legit, so if its Managed then start some instances + if _type == 'MANAGED': + cpus = int(compute_resources.get('desiredvCpus', compute_resources['minvCpus'])) + instance_types = compute_resources['instanceTypes'] + needed_instance_types = self.find_min_instances_to_meet_vcpus(instance_types, cpus) + # Create instances + + # Will loop over and over so we get decent subnet coverage + subnet_cycle = cycle(compute_resources['subnets']) + + for instance_type in needed_instance_types: + reservation = self.ec2_backend.add_instances( + image_id='ami-ecs-optimised', # Todo import AMIs + count=1, + user_data=None, + security_group_names=[], + instance_type=instance_type, + region_name=self.region_name, + subnet_id=six.next(subnet_cycle), + key_name=compute_resources.get('ec2KeyPair', 'AWS_OWNED'), + security_group_ids=compute_resources['securityGroupIds'] + ) + + new_comp_env.add_instance(reservation.instances[0]) + + # Create ECS cluster + # Should be of format P2OnDemand_Batch_UUID + cluster_name = 'OnDemand_Batch_' + str(uuid.uuid4()) + ecs_cluster = self.ecs_backend.create_cluster(cluster_name) + new_comp_env.set_ecs(ecs_cluster.arn, cluster_name) + + return compute_environment_name, new_comp_env.arn + + def _validate_compute_resources(self, cr): + """ + Checks contents of sub dictionary for managed clusters + + :param cr: computeResources + :type cr: dict + """ + for param in ('instanceRole', 'maxvCpus', 'minvCpus', 'instanceTypes', 'securityGroupIds', 'subnets', 'type'): + if param not in cr: + raise InvalidParameterValueException('computeResources must contain {0}'.format(param)) + + if self.iam_backend.get_role_by_arn(cr['instanceRole']) is None: + raise InvalidParameterValueException('could not find instanceRole {0}'.format(cr['instanceRole'])) + + if cr['maxvCpus'] < 0: + raise InvalidParameterValueException('maxVCpus must be positive') + if cr['minvCpus'] < 0: + raise InvalidParameterValueException('minVCpus must be positive') + if cr['maxvCpus'] < cr['minvCpus']: + raise InvalidParameterValueException('maxVCpus must be greater than minvCpus') + + if len(cr['instanceTypes']) == 0: + raise InvalidParameterValueException('At least 1 instance type must be provided') + for instance_type in cr['instanceTypes']: + if instance_type == 'optimal': + pass # Optimal should pick from latest of current gen + elif instance_type not in EC2_INSTANCE_TYPES: + raise InvalidParameterValueException('Instance type {0} does not exist'.format(instance_type)) + + for sec_id in cr['securityGroupIds']: + if self.ec2_backend.get_security_group_from_id(sec_id) is None: + raise InvalidParameterValueException('security group {0} does not exist'.format(sec_id)) + if len(cr['securityGroupIds']) == 0: + raise InvalidParameterValueException('At least 1 security group must be provided') + + for subnet_id in cr['subnets']: + try: + self.ec2_backend.get_subnet(subnet_id) + except InvalidSubnetIdError: + raise InvalidParameterValueException('subnet {0} does not exist'.format(subnet_id)) + if len(cr['subnets']) == 0: + raise InvalidParameterValueException('At least 1 subnet must be provided') + + if cr['type'] not in ('EC2', 'SPOT'): + raise InvalidParameterValueException('computeResources.type must be either EC2 | SPOT') + + if cr['type'] == 'SPOT': + raise InternalFailure('SPOT NOT SUPPORTED YET') + + @staticmethod + def find_min_instances_to_meet_vcpus(instance_types, target): + """ + Finds the minimum needed instances to meed a vcpu target + + :param instance_types: Instance types, like ['t2.medium', 't2.small'] + :type instance_types: list of str + :param target: VCPU target + :type target: float + :return: List of instance types + :rtype: list of str + """ + # vcpus = [ (vcpus, instance_type), (vcpus, instance_type), ... ] + instance_vcpus = [] + instances = [] + + for instance_type in instance_types: + if instance_type == 'optimal': + instance_type = 'm4.4xlarge' + + instance_vcpus.append( + (EC2_INSTANCE_TYPES[instance_type]['vcpus'], instance_type) + ) + + instance_vcpus = sorted(instance_vcpus, key=lambda item: item[0], reverse=True) + # Loop through, + # if biggest instance type smaller than target, and len(instance_types)> 1, then use biggest type + # if biggest instance type bigger than target, and len(instance_types)> 1, then remove it and move on + + # if biggest instance type bigger than target and len(instan_types) == 1 then add instance and finish + # if biggest instance type smaller than target and len(instan_types) == 1 then loop adding instances until target == 0 + # ^^ boils down to keep adding last till target vcpus is negative + # #Algorithm ;-) ... Could probably be done better with some quality lambdas + while target > 0: + current_vcpu, current_instance = instance_vcpus[0] + + if len(instance_vcpus) > 1: + if current_vcpu <= target: + target -= current_vcpu + instances.append(current_instance) + else: + # try next biggest instance + instance_vcpus.pop(0) + else: + # Were on the last instance + target -= current_vcpu + instances.append(current_instance) + + return instances + + def delete_compute_environment(self, compute_environment_name): + if compute_environment_name is None: + raise InvalidParameterValueException('Missing computeEnvironment parameter') + + compute_env = self.get_compute_environment(compute_environment_name) + + if compute_env is not None: + # Pop ComputeEnvironment + self._compute_environments.pop(compute_env.arn) + + # Delete ECS cluster + self.ecs_backend.delete_cluster(compute_env.ecs_name) + + if compute_env.env_type == 'MANAGED': + # Delete compute envrionment + instance_ids = [instance.id for instance in compute_env.instances] + self.ec2_backend.terminate_instances(instance_ids) + + def update_compute_environment(self, compute_environment_name, state, compute_resources, service_role): + # Validate + compute_env = self.get_compute_environment(compute_environment_name) + if compute_env is None: + raise ClientException('Compute environment {0} does not exist') + + # Look for IAM role + if service_role is not None: + try: + role = self.iam_backend.get_role_by_arn(service_role) + except IAMNotFoundException: + raise InvalidParameterValueException('Could not find IAM role {0}'.format(service_role)) + + compute_env.service_role = role + + if state is not None: + if state not in ('ENABLED', 'DISABLED'): + raise InvalidParameterValueException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + compute_env.state = state + + if compute_resources is not None: + # TODO Implement resizing of instances based on changing vCpus + # compute_resources CAN contain desiredvCpus, maxvCpus, minvCpus, and can contain none of them. + pass + + return compute_env.name, compute_env.arn + + def create_job_queue(self, queue_name, priority, state, compute_env_order): + """ + Create a job queue + + :param queue_name: Queue name + :type queue_name: str + :param priority: Queue priority + :type priority: int + :param state: Queue state + :type state: string + :param compute_env_order: Compute environment list + :type compute_env_order: list of dict + :return: Tuple of Name, ARN + :rtype: tuple of str + """ + for variable, var_name in ((queue_name, 'jobQueueName'), (priority, 'priority'), (state, 'state'), (compute_env_order, 'computeEnvironmentOrder')): + if variable is None: + raise ClientException('{0} must be provided'.format(var_name)) + + if state not in ('ENABLED', 'DISABLED'): + raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) + if self.get_job_queue_by_name(queue_name) is not None: + raise ClientException('Job queue {0} already exists'.format(queue_name)) + + if len(compute_env_order) == 0: + raise ClientException('At least 1 compute environment must be provided') + try: + # orders and extracts computeEnvironment names + ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] + env_objects = [] + # Check each ARN exists, then make a list of compute env's + for arn in ordered_compute_environments: + env = self.get_compute_environment_by_arn(arn) + if env is None: + raise ClientException('Compute environment {0} does not exist'.format(arn)) + env_objects.append(env) + except Exception: + raise ClientException('computeEnvironmentOrder is malformed') + + # Create new Job Queue + queue = JobQueue(queue_name, priority, state, env_objects, compute_env_order, self.region_name) + self._job_queues[queue.arn] = queue + + return queue_name, queue.arn + + def describe_job_queues(self, job_queues=None, max_results=None, next_token=None): + envs = set() + if job_queues is not None: + envs = set(job_queues) + + result = [] + for arn, job_queue in self._job_queues.items(): + # Filter shortcut + if len(envs) > 0 and arn not in envs and job_queue.name not in envs: + continue + + result.append(job_queue.describe()) + + return result + + def update_job_queue(self, queue_name, priority, state, compute_env_order): + """ + Update a job queue + + :param queue_name: Queue name + :type queue_name: str + :param priority: Queue priority + :type priority: int + :param state: Queue state + :type state: string + :param compute_env_order: Compute environment list + :type compute_env_order: list of dict + :return: Tuple of Name, ARN + :rtype: tuple of str + """ + if queue_name is None: + raise ClientException('jobQueueName must be provided') + + job_queue = self.get_job_queue(queue_name) + if job_queue is None: + raise ClientException('Job queue {0} does not exist'.format(queue_name)) + + if state is not None: + if state not in ('ENABLED', 'DISABLED'): + raise ClientException('state {0} must be one of ENABLED | DISABLED'.format(state)) + + job_queue.state = state + + if compute_env_order is not None: + if len(compute_env_order) == 0: + raise ClientException('At least 1 compute environment must be provided') + try: + # orders and extracts computeEnvironment names + ordered_compute_environments = [item['computeEnvironment'] for item in sorted(compute_env_order, key=lambda x: x['order'])] + env_objects = [] + # Check each ARN exists, then make a list of compute env's + for arn in ordered_compute_environments: + env = self.get_compute_environment_by_arn(arn) + if env is None: + raise ClientException('Compute environment {0} does not exist'.format(arn)) + env_objects.append(env) + except Exception: + raise ClientException('computeEnvironmentOrder is malformed') + + job_queue.env_order_json = compute_env_order + job_queue.environments = env_objects + + if priority is not None: + job_queue.priority = priority + + return queue_name, job_queue.arn + + def delete_job_queue(self, queue_name): + job_queue = self.get_job_queue(queue_name) + + if job_queue is not None: + del self._job_queues[job_queue.arn] + + def register_job_definition(self, def_name, parameters, _type, retry_strategy, container_properties): + if def_name is None: + raise ClientException('jobDefinitionName must be provided') + + job_def = self.get_job_definition_by_name(def_name) + if retry_strategy is not None: + try: + retry_strategy = retry_strategy['attempts'] + except Exception: + raise ClientException('retryStrategy is malformed') + + if job_def is None: + job_def = JobDefinition(def_name, parameters, _type, container_properties, region_name=self.region_name, retry_strategy=retry_strategy) + else: + # Make new jobdef + job_def = job_def.update(parameters, _type, container_properties, retry_strategy) + + self._job_definitions[job_def.arn] = job_def + + return def_name, job_def.arn, job_def.revision + + def deregister_job_definition(self, def_name): + job_def = self.get_job_definition_by_arn(def_name) + if job_def is None and ':' in def_name: + name, revision = def_name.split(':', 1) + job_def = self.get_job_definition_by_name_revision(name, revision) + + if job_def is not None: + del self._job_definitions[job_def.arn] + + def describe_job_definitions(self, job_def_name=None, job_def_list=None, status=None, max_results=None, next_token=None): + jobs = [] + + # As a job name can reference multiple revisions, we get a list of them + if job_def_name is not None: + job_def = self.get_job_definitions(job_def_name) + if job_def is not None: + jobs.extend(job_def) + elif job_def_list is not None: + for job in job_def_list: + job_def = self.get_job_definitions(job) + if job_def is not None: + jobs.extend(job_def) + else: + jobs.extend(self._job_definitions.values()) + + # Got all the job defs were after, filter then by status + if status is not None: + return [job for job in jobs if job.status == status] + return jobs + + def submit_job(self, job_name, job_def_id, job_queue, parameters=None, retries=None, depends_on=None, container_overrides=None): + # TODO parameters, retries (which is a dict raw from request), job dependancies and container overrides are ignored for now + + # Look for job definition + job_def = self.get_job_definition_by_arn(job_def_id) + if job_def is None and ':' in job_def_id: + job_def = self.get_job_definition_by_name_revision(*job_def_id.split(':', 1)) + if job_def is None: + raise ClientException('Job definition {0} does not exist'.format(job_def_id)) + + queue = self.get_job_queue(job_queue) + if queue is None: + raise ClientException('Job queue {0} does not exist'.format(job_queue)) + + job = Job(job_name, job_def, queue, log_backend=self.logs_backend) + self._jobs[job.job_id] = job + + # Here comes the fun + job.start() + + return job_name, job.job_id + + def describe_jobs(self, jobs): + job_filter = set() + if jobs is not None: + job_filter = set(jobs) + + result = [] + for key, job in self._jobs.items(): + if len(job_filter) > 0 and key not in job_filter: + continue + + result.append(job.describe()) + + return result + + def list_jobs(self, job_queue, job_status=None, max_results=None, next_token=None): + jobs = [] + + job_queue = self.get_job_queue(job_queue) + if job_queue is None: + raise ClientException('Job queue {0} does not exist'.format(job_queue)) + + if job_status is not None and job_status not in ('SUBMITTED', 'PENDING', 'RUNNABLE', 'STARTING', 'RUNNING', 'SUCCEEDED', 'FAILED'): + raise ClientException('Job status is not one of SUBMITTED | PENDING | RUNNABLE | STARTING | RUNNING | SUCCEEDED | FAILED') + + for job in job_queue.jobs: + if job_status is not None and job.job_state != job_status: + continue + + jobs.append(job) + + return jobs + + def terminate_job(self, job_id, reason): + if job_id is None: + raise ClientException('Job ID does not exist') + if reason is None: + raise ClientException('Reason does not exist') + + job = self.get_job_by_id(job_id) + if job is None: + raise ClientException('Job not found') + + job.terminate(reason) + + +available_regions = boto3.session.Session().get_available_regions("batch") +batch_backends = {region: BatchBackend(region_name=region) for region in available_regions} diff --git a/moto/batch/responses.py b/moto/batch/responses.py new file mode 100644 index 000000000..e626b7d4c --- /dev/null +++ b/moto/batch/responses.py @@ -0,0 +1,296 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import batch_backends +from six.moves.urllib.parse import urlsplit + +from .exceptions import AWSError + +import json + + +class BatchResponse(BaseResponse): + def _error(self, code, message): + return json.dumps({'__type': code, 'message': message}), dict(status=400) + + @property + def batch_backend(self): + """ + :return: Batch Backend + :rtype: moto.batch.models.BatchBackend + """ + return batch_backends[self.region] + + @property + def json(self): + if self.body is None or self.body == '': + self._json = {} + elif not hasattr(self, '_json'): + try: + self._json = json.loads(self.body) + except json.JSONDecodeError: + print() + return self._json + + def _get_param(self, param_name, if_none=None): + val = self.json.get(param_name) + if val is not None: + return val + return if_none + + def _get_action(self): + # Return element after the /v1/* + return urlsplit(self.uri).path.lstrip('/').split('/')[1] + + # CreateComputeEnvironment + def createcomputeenvironment(self): + compute_env_name = self._get_param('computeEnvironmentName') + compute_resource = self._get_param('computeResources') + service_role = self._get_param('serviceRole') + state = self._get_param('state') + _type = self._get_param('type') + + try: + name, arn = self.batch_backend.create_compute_environment( + compute_environment_name=compute_env_name, + _type=_type, state=state, + compute_resources=compute_resource, + service_role=service_role + ) + except AWSError as err: + return err.response() + + result = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': name + } + + return json.dumps(result) + + # DescribeComputeEnvironments + def describecomputeenvironments(self): + compute_environments = self._get_param('computeEnvironments') + max_results = self._get_param('maxResults') # Ignored, should be int + next_token = self._get_param('nextToken') # Ignored + + envs = self.batch_backend.describe_compute_environments(compute_environments, max_results=max_results, next_token=next_token) + + result = {'computeEnvironments': envs} + return json.dumps(result) + + # DeleteComputeEnvironment + def deletecomputeenvironment(self): + compute_environment = self._get_param('computeEnvironment') + + try: + self.batch_backend.delete_compute_environment(compute_environment) + except AWSError as err: + return err.response() + + return '' + + # UpdateComputeEnvironment + def updatecomputeenvironment(self): + compute_env_name = self._get_param('computeEnvironment') + compute_resource = self._get_param('computeResources') + service_role = self._get_param('serviceRole') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.update_compute_environment( + compute_environment_name=compute_env_name, + compute_resources=compute_resource, + service_role=service_role, + state=state + ) + except AWSError as err: + return err.response() + + result = { + 'computeEnvironmentArn': arn, + 'computeEnvironmentName': name + } + + return json.dumps(result) + + # CreateJobQueue + def createjobqueue(self): + compute_env_order = self._get_param('computeEnvironmentOrder') + queue_name = self._get_param('jobQueueName') + priority = self._get_param('priority') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.create_job_queue( + queue_name=queue_name, + priority=priority, + state=state, + compute_env_order=compute_env_order + ) + except AWSError as err: + return err.response() + + result = { + 'jobQueueArn': arn, + 'jobQueueName': name + } + + return json.dumps(result) + + # DescribeJobQueues + def describejobqueues(self): + job_queues = self._get_param('jobQueues') + max_results = self._get_param('maxResults') # Ignored, should be int + next_token = self._get_param('nextToken') # Ignored + + queues = self.batch_backend.describe_job_queues(job_queues, max_results=max_results, next_token=next_token) + + result = {'jobQueues': queues} + return json.dumps(result) + + # UpdateJobQueue + def updatejobqueue(self): + compute_env_order = self._get_param('computeEnvironmentOrder') + queue_name = self._get_param('jobQueue') + priority = self._get_param('priority') + state = self._get_param('state') + + try: + name, arn = self.batch_backend.update_job_queue( + queue_name=queue_name, + priority=priority, + state=state, + compute_env_order=compute_env_order + ) + except AWSError as err: + return err.response() + + result = { + 'jobQueueArn': arn, + 'jobQueueName': name + } + + return json.dumps(result) + + # DeleteJobQueue + def deletejobqueue(self): + queue_name = self._get_param('jobQueue') + + self.batch_backend.delete_job_queue(queue_name) + + return '' + + # RegisterJobDefinition + def registerjobdefinition(self): + container_properties = self._get_param('containerProperties') + def_name = self._get_param('jobDefinitionName') + parameters = self._get_param('parameters') + retry_strategy = self._get_param('retryStrategy') + _type = self._get_param('type') + + try: + name, arn, revision = self.batch_backend.register_job_definition( + def_name=def_name, + parameters=parameters, + _type=_type, + retry_strategy=retry_strategy, + container_properties=container_properties + ) + except AWSError as err: + return err.response() + + result = { + 'jobDefinitionArn': arn, + 'jobDefinitionName': name, + 'revision': revision + } + + return json.dumps(result) + + # DeregisterJobDefinition + def deregisterjobdefinition(self): + queue_name = self._get_param('jobDefinition') + + self.batch_backend.deregister_job_definition(queue_name) + + return '' + + # DescribeJobDefinitions + def describejobdefinitions(self): + job_def_name = self._get_param('jobDefinitionName') + job_def_list = self._get_param('jobDefinitions') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + status = self._get_param('status') + + job_defs = self.batch_backend.describe_job_definitions(job_def_name, job_def_list, status, max_results, next_token) + + result = {'jobDefinitions': [job.describe() for job in job_defs]} + return json.dumps(result) + + # SubmitJob + def submitjob(self): + container_overrides = self._get_param('containerOverrides') + depends_on = self._get_param('dependsOn') + job_def = self._get_param('jobDefinition') + job_name = self._get_param('jobName') + job_queue = self._get_param('jobQueue') + parameters = self._get_param('parameters') + retries = self._get_param('retryStrategy') + + try: + name, job_id = self.batch_backend.submit_job( + job_name, job_def, job_queue, + parameters=parameters, + retries=retries, + depends_on=depends_on, + container_overrides=container_overrides + ) + except AWSError as err: + return err.response() + + result = { + 'jobId': job_id, + 'jobName': name, + } + + return json.dumps(result) + + # DescribeJobs + def describejobs(self): + jobs = self._get_param('jobs') + + try: + return json.dumps({'jobs': self.batch_backend.describe_jobs(jobs)}) + except AWSError as err: + return err.response() + + # ListJobs + def listjobs(self): + job_queue = self._get_param('jobQueue') + job_status = self._get_param('jobStatus') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + + try: + jobs = self.batch_backend.list_jobs(job_queue, job_status, max_results, next_token) + except AWSError as err: + return err.response() + + result = {'jobSummaryList': [{'jobId': job.job_id, 'jobName': job.job_name} for job in jobs]} + return json.dumps(result) + + # TerminateJob + def terminatejob(self): + job_id = self._get_param('jobId') + reason = self._get_param('reason') + + try: + self.batch_backend.terminate_job(job_id, reason) + except AWSError as err: + return err.response() + + return '' + + # CancelJob + def canceljob(self): # Theres some AWS semantics on the differences but for us they're identical ;-) + return self.terminatejob() diff --git a/moto/batch/urls.py b/moto/batch/urls.py new file mode 100644 index 000000000..c64086ef2 --- /dev/null +++ b/moto/batch/urls.py @@ -0,0 +1,25 @@ +from __future__ import unicode_literals +from .responses import BatchResponse + +url_bases = [ + "https?://batch.(.+).amazonaws.com", +] + +url_paths = { + '{0}/v1/createcomputeenvironment$': BatchResponse.dispatch, + '{0}/v1/describecomputeenvironments$': BatchResponse.dispatch, + '{0}/v1/deletecomputeenvironment': BatchResponse.dispatch, + '{0}/v1/updatecomputeenvironment': BatchResponse.dispatch, + '{0}/v1/createjobqueue': BatchResponse.dispatch, + '{0}/v1/describejobqueues': BatchResponse.dispatch, + '{0}/v1/updatejobqueue': BatchResponse.dispatch, + '{0}/v1/deletejobqueue': BatchResponse.dispatch, + '{0}/v1/registerjobdefinition': BatchResponse.dispatch, + '{0}/v1/deregisterjobdefinition': BatchResponse.dispatch, + '{0}/v1/describejobdefinitions': BatchResponse.dispatch, + '{0}/v1/submitjob': BatchResponse.dispatch, + '{0}/v1/describejobs': BatchResponse.dispatch, + '{0}/v1/listjobs': BatchResponse.dispatch, + '{0}/v1/terminatejob': BatchResponse.dispatch, + '{0}/v1/canceljob': BatchResponse.dispatch, +} diff --git a/moto/batch/utils.py b/moto/batch/utils.py new file mode 100644 index 000000000..829a55f12 --- /dev/null +++ b/moto/batch/utils.py @@ -0,0 +1,22 @@ +from __future__ import unicode_literals + + +def make_arn_for_compute_env(account_id, name, region_name): + return "arn:aws:batch:{0}:{1}:compute-environment/{2}".format(region_name, account_id, name) + + +def make_arn_for_job_queue(account_id, name, region_name): + return "arn:aws:batch:{0}:{1}:job-queue/{2}".format(region_name, account_id, name) + + +def make_arn_for_task_def(account_id, name, revision, region_name): + return "arn:aws:batch:{0}:{1}:job-definition/{2}:{3}".format(region_name, account_id, name, revision) + + +def lowercase_first_key(some_dict): + new_dict = {} + for key, value in some_dict.items(): + new_key = key[0].lower() + key[1:] + new_dict[new_key] = value + + return new_dict diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index 923ada058..1c13c5058 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -8,12 +8,14 @@ import re from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models +from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models from moto.datapipeline import models as datapipeline_models from moto.dynamodb import models as dynamodb_models from moto.ec2 import models as ec2_models from moto.ecs import models as ecs_models from moto.elb import models as elb_models +from moto.elbv2 import models as elbv2_models from moto.iam import models as iam_models from moto.kinesis import models as kinesis_models from moto.kms import models as kms_models @@ -31,6 +33,9 @@ from boto.cloudformation.stack import Output MODEL_MAP = { "AWS::AutoScaling::AutoScalingGroup": autoscaling_models.FakeAutoScalingGroup, "AWS::AutoScaling::LaunchConfiguration": autoscaling_models.FakeLaunchConfiguration, + "AWS::Batch::JobDefinition": batch_models.JobDefinition, + "AWS::Batch::JobQueue": batch_models.JobQueue, + "AWS::Batch::ComputeEnvironment": batch_models.ComputeEnvironment, "AWS::DynamoDB::Table": dynamodb_models.Table, "AWS::Kinesis::Stream": kinesis_models.Stream, "AWS::Lambda::EventSourceMapping": lambda_models.EventSourceMapping, @@ -57,6 +62,9 @@ MODEL_MAP = { "AWS::ECS::TaskDefinition": ecs_models.TaskDefinition, "AWS::ECS::Service": ecs_models.Service, "AWS::ElasticLoadBalancing::LoadBalancer": elb_models.FakeLoadBalancer, + "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, + "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, + "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, @@ -322,7 +330,7 @@ def parse_output(output_logical_id, output_json, resources_map): output_json = clean_json(output_json, resources_map) output = Output() output.key = output_logical_id - output.value = output_json['Value'] + output.value = clean_json(output_json['Value'], resources_map) output.description = output_json.get('Description') return output diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 423cf92c1..a5b251b89 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -19,10 +19,19 @@ class CloudFormationResponse(BaseResponse): template_url_parts = urlparse(template_url) if "localhost" in template_url: bucket_name, key_name = template_url_parts.path.lstrip( - "/").split("/") + "/").split("/", 1) else: - bucket_name = template_url_parts.netloc.split(".")[0] - key_name = template_url_parts.path.lstrip("/") + if template_url_parts.netloc.endswith('amazonaws.com') \ + and template_url_parts.netloc.startswith('s3'): + # Handle when S3 url uses amazon url with bucket in path + # Also handles getting region as technically s3 is region'd + + # region = template_url.netloc.split('.')[1] + bucket_name, key_name = template_url_parts.path.lstrip( + "/").split("/", 1) + else: + bucket_name = template_url_parts.netloc.split(".")[0] + key_name = template_url_parts.path.lstrip("/") key = s3_backend.get_key(bucket_name, key_name) return key.value.decode("utf-8") @@ -227,13 +236,13 @@ CREATE_STACK_RESPONSE_TEMPLATE = """ """ -UPDATE_STACK_RESPONSE_TEMPLATE = """ +UPDATE_STACK_RESPONSE_TEMPLATE = """ {{ stack.stack_id }} - b9b5b068-3a41-11e5-94eb-example - + b9b4b068-3a41-11e5-94eb-example +
""" @@ -399,16 +408,6 @@ GET_TEMPLATE_RESPONSE_TEMPLATE = """ """ -UPDATE_STACK_RESPONSE_TEMPLATE = """ - - {{ stack.stack_id }} - - - b9b4b068-3a41-11e5-94eb-example - - -""" - DELETE_STACK_RESPONSE_TEMPLATE = """ 5ccc7dcd-744c-11e5-be70-example @@ -416,6 +415,7 @@ DELETE_STACK_RESPONSE_TEMPLATE = """ """ + LIST_EXPORTS_RESPONSE = """ diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index ac328def2..f9d571a23 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -1,4 +1,7 @@ +import json + from moto.core import BaseBackend, BaseModel +from moto.core.exceptions import RESTError import boto.ec2.cloudwatch import datetime @@ -35,9 +38,26 @@ class FakeAlarm(BaseModel): self.ok_actions = ok_actions self.insufficient_data_actions = insufficient_data_actions self.unit = unit - self.state_updated_timestamp = datetime.datetime.utcnow() self.configuration_updated_timestamp = datetime.datetime.utcnow() + self.history = [] + + self.state_reason = '' + self.state_reason_data = '{}' + self.state = 'OK' + self.state_updated_timestamp = datetime.datetime.utcnow() + + def update_state(self, reason, reason_data, state_value): + # History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action + self.history.append( + ('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp) + ) + + self.state_reason = reason + self.state_reason_data = reason_data + self.state = state_value + self.state_updated_timestamp = datetime.datetime.utcnow() + class MetricDatum(BaseModel): @@ -122,10 +142,8 @@ class CloudWatchBackend(BaseBackend): if alarm.name in alarm_names ] - def get_alarms_by_state_value(self, state): - raise NotImplementedError( - "DescribeAlarm by state is not implemented in moto." - ) + def get_alarms_by_state_value(self, target_state): + return filter(lambda alarm: alarm.state == target_state, self.alarms.values()) def delete_alarms(self, alarm_names): for alarm_name in alarm_names: @@ -164,6 +182,21 @@ class CloudWatchBackend(BaseBackend): def get_dashboard(self, dashboard): return self.dashboards.get(dashboard) + def set_alarm_state(self, alarm_name, reason, reason_data, state_value): + try: + if reason_data is not None: + json.loads(reason_data) + except ValueError: + raise RESTError('InvalidFormat', 'StateReasonData is invalid JSON') + + if alarm_name not in self.alarms: + raise RESTError('ResourceNotFound', 'Alarm {0} not found'.format(alarm_name), status=404) + + if state_value not in ('OK', 'ALARM', 'INSUFFICIENT_DATA'): + raise RESTError('InvalidParameterValue', 'StateValue is not one of OK | ALARM | INSUFFICIENT_DATA') + + self.alarms[alarm_name].update_state(reason, reason_data, state_value) + class LogGroup(BaseModel): diff --git a/moto/cloudwatch/responses.py b/moto/cloudwatch/responses.py index cd7ce123e..7a5fa5ebd 100644 --- a/moto/cloudwatch/responses.py +++ b/moto/cloudwatch/responses.py @@ -1,4 +1,5 @@ import json +from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import cloudwatch_backends @@ -13,6 +14,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(ERROR_RESPONSE_TEMPLATE) return template.render(code=code, message=message), dict(status=status) + @amzn_request_id def put_metric_alarm(self): name = self._get_param('AlarmName') namespace = self._get_param('Namespace') @@ -40,6 +42,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_METRIC_ALARM_TEMPLATE) return template.render(alarm=alarm) + @amzn_request_id def describe_alarms(self): action_prefix = self._get_param('ActionPrefix') alarm_name_prefix = self._get_param('AlarmNamePrefix') @@ -62,12 +65,14 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(DESCRIBE_ALARMS_TEMPLATE) return template.render(alarms=alarms) + @amzn_request_id def delete_alarms(self): alarm_names = self._get_multi_param('AlarmNames.member') self.cloudwatch_backend.delete_alarms(alarm_names) template = self.response_template(DELETE_METRIC_ALARMS_TEMPLATE) return template.render() + @amzn_request_id def put_metric_data(self): namespace = self._get_param('Namespace') metric_data = [] @@ -99,11 +104,13 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_METRIC_DATA_TEMPLATE) return template.render() + @amzn_request_id def list_metrics(self): metrics = self.cloudwatch_backend.get_all_metrics() template = self.response_template(LIST_METRICS_TEMPLATE) return template.render(metrics=metrics) + @amzn_request_id def delete_dashboards(self): dashboards = self._get_multi_param('DashboardNames.member') if dashboards is None: @@ -116,18 +123,23 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(DELETE_DASHBOARD_TEMPLATE) return template.render() + @amzn_request_id def describe_alarm_history(self): raise NotImplementedError() + @amzn_request_id def describe_alarms_for_metric(self): raise NotImplementedError() + @amzn_request_id def disable_alarm_actions(self): raise NotImplementedError() + @amzn_request_id def enable_alarm_actions(self): raise NotImplementedError() + @amzn_request_id def get_dashboard(self): dashboard_name = self._get_param('DashboardName') @@ -138,9 +150,11 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(GET_DASHBOARD_TEMPLATE) return template.render(dashboard=dashboard) + @amzn_request_id def get_metric_statistics(self): raise NotImplementedError() + @amzn_request_id def list_dashboards(self): prefix = self._get_param('DashboardNamePrefix', '') @@ -149,6 +163,7 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(LIST_DASHBOARD_RESPONSE) return template.render(dashboards=dashboards) + @amzn_request_id def put_dashboard(self): name = self._get_param('DashboardName') body = self._get_param('DashboardBody') @@ -163,14 +178,23 @@ class CloudWatchResponse(BaseResponse): template = self.response_template(PUT_DASHBOARD_RESPONSE) return template.render() + @amzn_request_id def set_alarm_state(self): - raise NotImplementedError() + alarm_name = self._get_param('AlarmName') + reason = self._get_param('StateReason') + reason_data = self._get_param('StateReasonData') + state_value = self._get_param('StateValue') + + self.cloudwatch_backend.set_alarm_state(alarm_name, reason, reason_data, state_value) + + template = self.response_template(SET_ALARM_STATE_TEMPLATE) + return template.render() PUT_METRIC_ALARM_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -229,7 +253,7 @@ DESCRIBE_ALARMS_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -237,7 +261,7 @@ DELETE_METRIC_ALARMS_TEMPLATE = """ - 2690d7eb-ed86-11dd-9877-6fad448a8419 + {{ request_id }} """ @@ -271,7 +295,7 @@ PUT_DASHBOARD_RESPONSE = """ - 68d1dc8c-9faa-11e7-a694-df2715690df2 + {{ request_id }} """ @@ -307,16 +331,22 @@ GET_DASHBOARD_TEMPLATE = """ + + {{ request_id }} + +""" + ERROR_RESPONSE_TEMPLATE = """ Sender {{ code }} {{ message }} - 5e45fd1e-9fa3-11e7-b720-89e8821d38c4 + {{ request_id }} """ diff --git a/moto/core/exceptions.py b/moto/core/exceptions.py index 5474707d6..40202f7bd 100644 --- a/moto/core/exceptions.py +++ b/moto/core/exceptions.py @@ -34,6 +34,8 @@ ERROR_JSON_RESPONSE = u"""{ class RESTError(HTTPException): + code = 400 + templates = { 'single_error': SINGLE_ERROR_RESPONSE, 'error': ERROR_RESPONSE, @@ -54,7 +56,6 @@ class DryRunClientError(RESTError): class JsonRESTError(RESTError): - def __init__(self, error_type, message, template='error_json', **kwargs): super(JsonRESTError, self).__init__( error_type, message, template, **kwargs) diff --git a/moto/core/models.py b/moto/core/models.py index 6e93f911a..c6fb72ffa 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -1,3 +1,4 @@ +# -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import absolute_import @@ -176,16 +177,49 @@ class ServerModeMockAWS(BaseMockAWS): if 'endpoint_url' not in kwargs: kwargs['endpoint_url'] = "http://localhost:5000" return real_boto3_resource(*args, **kwargs) + + def fake_httplib_send_output(self, message_body=None, *args, **kwargs): + def _convert_to_bytes(mixed_buffer): + bytes_buffer = [] + for chunk in mixed_buffer: + if isinstance(chunk, six.text_type): + bytes_buffer.append(chunk.encode('utf-8')) + else: + bytes_buffer.append(chunk) + msg = b"\r\n".join(bytes_buffer) + return msg + + self._buffer.extend((b"", b"")) + msg = _convert_to_bytes(self._buffer) + del self._buffer[:] + if isinstance(message_body, bytes): + msg += message_body + message_body = None + self.send(msg) + # if self._expect_header_set: + # read, write, exc = select.select([self.sock], [], [self.sock], 1) + # if read: + # self._handle_expect_response(message_body) + # return + if message_body is not None: + self.send(message_body) + self._client_patcher = mock.patch('boto3.client', fake_boto3_client) - self._resource_patcher = mock.patch( - 'boto3.resource', fake_boto3_resource) + self._resource_patcher = mock.patch('boto3.resource', fake_boto3_resource) + if six.PY2: + self._httplib_patcher = mock.patch('httplib.HTTPConnection._send_output', fake_httplib_send_output) + self._client_patcher.start() self._resource_patcher.start() + if six.PY2: + self._httplib_patcher.start() def disable_patching(self): if self._client_patcher: self._client_patcher.stop() self._resource_patcher.stop() + if six.PY2: + self._httplib_patcher.stop() class Model(type): diff --git a/moto/core/responses.py b/moto/core/responses.py index e85054802..5fce3bf9d 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -17,6 +17,8 @@ from six.moves.urllib.parse import parse_qs, urlparse import xmltodict from pkg_resources import resource_filename from werkzeug.exceptions import HTTPException + +import boto3 from moto.compat import OrderedDict from moto.core.utils import camelcase_to_underscores, method_names_from_class @@ -103,7 +105,8 @@ class _TemplateEnvironmentMixin(object): class BaseResponse(_TemplateEnvironmentMixin): default_region = 'us-east-1' - region_regex = r'\.(.+?)\.amazonaws\.com' + # to extract region, use [^.] + region_regex = r'\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com' aws_service_spec = None @classmethod @@ -151,12 +154,12 @@ class BaseResponse(_TemplateEnvironmentMixin): querystring.update(headers) querystring = _decode_dict(querystring) - self.uri = full_url self.path = urlparse(full_url).path self.querystring = querystring self.method = request.method self.region = self.get_region_from_url(request, full_url) + self.uri_match = None self.headers = request.headers if 'host' not in self.headers: @@ -178,6 +181,58 @@ class BaseResponse(_TemplateEnvironmentMixin): self.setup_class(request, full_url, headers) return self.call_action() + def uri_to_regexp(self, uri): + """converts uri w/ placeholder to regexp + '/cars/{carName}/drivers/{DriverName}' + -> '^/cars/.*/drivers/[^/]*$' + + '/cars/{carName}/drivers/{DriverName}/drive' + -> '^/cars/.*/drivers/.*/drive$' + + """ + def _convert(elem, is_last): + if not re.match('^{.*}$', elem): + return elem + name = elem.replace('{', '').replace('}', '') + if is_last: + return '(?P<%s>[^/]*)' % name + return '(?P<%s>.*)' % name + + elems = uri.split('/') + num_elems = len(elems) + regexp = '^{}$'.format('/'.join([_convert(elem, (i == num_elems - 1)) for i, elem in enumerate(elems)])) + return regexp + + def _get_action_from_method_and_request_uri(self, method, request_uri): + """basically used for `rest-json` APIs + You can refer to example from link below + https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json + """ + + # service response class should have 'SERVICE_NAME' class member, + # if you want to get action from method and url + if not hasattr(self, 'SERVICE_NAME'): + return None + service = self.SERVICE_NAME + conn = boto3.client(service, region_name=self.region) + + # make cache if it does not exist yet + if not hasattr(self, 'method_urls'): + self.method_urls = defaultdict(lambda: defaultdict(str)) + op_names = conn._service_model.operation_names + for op_name in op_names: + op_model = conn._service_model.operation_model(op_name) + _method = op_model.http['method'] + uri_regexp = self.uri_to_regexp(op_model.http['requestUri']) + self.method_urls[_method][uri_regexp] = op_model.name + regexp_and_names = self.method_urls[method] + for regexp, name in regexp_and_names.items(): + match = re.match(regexp, request_uri) + self.uri_match = match + if match: + return name + return None + def _get_action(self): action = self.querystring.get('Action', [""])[0] if not action: # Some services use a header for the action @@ -186,7 +241,9 @@ class BaseResponse(_TemplateEnvironmentMixin): 'x-amz-target') or self.headers.get('X-Amz-Target') if match: action = match.split(".")[-1] - + # get action from method and uri + if not action: + return self._get_action_from_method_and_request_uri(self.method, self.path) return action def call_action(self): @@ -199,10 +256,14 @@ class BaseResponse(_TemplateEnvironmentMixin): response = method() except HTTPException as http_error: response = http_error.description, dict(status=http_error.code) + if isinstance(response, six.string_types): return 200, headers, response else: - body, new_headers = response + if len(response) == 2: + body, new_headers = response + else: + status, new_headers, body = response status = new_headers.get('status', 200) headers.update(new_headers) # Cast status to string @@ -217,6 +278,22 @@ class BaseResponse(_TemplateEnvironmentMixin): val = self.querystring.get(param_name) if val is not None: return val[0] + + # try to get json body parameter + if self.body is not None: + try: + return json.loads(self.body)[param_name] + except ValueError: + pass + except KeyError: + pass + # try to get path parameter + if self.uri_match: + try: + return self.uri_match.group(param_name) + except IndexError: + # do nothing if param is not found + pass return if_none def _get_int_param(self, param_name, if_none=None): diff --git a/moto/core/utils.py b/moto/core/utils.py index 9ee0c1814..43f05672e 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -1,10 +1,16 @@ from __future__ import unicode_literals +from functools import wraps +import binascii import datetime import inspect import random import re import six +import string + + +REQUEST_ID_LONG = string.digits + string.ascii_uppercase def camelcase_to_underscores(argument): @@ -194,3 +200,87 @@ def unix_time(dt=None): def unix_time_millis(dt=None): return unix_time(dt) * 1000.0 + + +def gen_amz_crc32(response, headerdict=None): + if not isinstance(response, bytes): + response = response.encode() + + crc = str(binascii.crc32(response)) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amz-crc32': crc}) + + return crc + + +def gen_amzn_requestid_long(headerdict=None): + req_id = ''.join([random.choice(REQUEST_ID_LONG) for _ in range(0, 52)]) + + if headerdict is not None and isinstance(headerdict, dict): + headerdict.update({'x-amzn-requestid': req_id}) + + return req_id + + +def amz_crc32(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + # Cast status to string + if "status" in headers: + headers['status'] = str(headers['status']) + + try: + # Doesnt work on python2 for some odd unicode strings + gen_amz_crc32(body, headers) + except Exception: + pass + + return status, headers, body + + return _wrapper + + +def amzn_request_id(f): + @wraps(f) + def _wrapper(*args, **kwargs): + response = f(*args, **kwargs) + + headers = {} + status = 200 + + if isinstance(response, six.string_types): + body = response + else: + if len(response) == 2: + body, new_headers = response + status = new_headers.get('status', 200) + else: + status, new_headers, body = response + headers.update(new_headers) + + request_id = gen_amzn_requestid_long(headers) + + # Update request ID in XML + try: + body = body.replace('{{ requestid }}', request_id) + except Exception: # Will just ignore if it cant work on bytes (which are str's on python2) + pass + + return status, headers, body + + return _wrapper diff --git a/moto/dynamodb2/__init__.py b/moto/dynamodb2/__init__.py index ad3f042d2..a56a83b35 100644 --- a/moto/dynamodb2/__init__.py +++ b/moto/dynamodb2/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals -from .models import dynamodb_backend2 +from .models import dynamodb_backends as dynamodb_backends2 +from ..core.models import base_decorator, deprecated_base_decorator -dynamodb_backends2 = {"global": dynamodb_backend2} -mock_dynamodb2 = dynamodb_backend2.decorator -mock_dynamodb2_deprecated = dynamodb_backend2.deprecated_decorator +dynamodb_backend2 = dynamodb_backends2['us-east-1'] +mock_dynamodb2 = base_decorator(dynamodb_backends2) +mock_dynamodb2_deprecated = deprecated_base_decorator(dynamodb_backends2) diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 8462c2de5..68051460e 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -43,16 +43,14 @@ def get_comparison_func(range_comparison): return COMPARISON_FUNCS.get(range_comparison) -# +class RecursionStopIteration(StopIteration): + pass + + def get_filter_expression(expr, names, values): # Examples # expr = 'Id > 5 AND attribute_exists(test) AND Id BETWEEN 5 AND 6 OR length < 6 AND contains(test, 1) AND 5 IN (4,5, 6) OR (Id < 5 AND 5 > Id)' # expr = 'Id > 5 AND Subs < 7' - - # Need to do some dodgyness for NOT i think. - if 'NOT' in expr: - raise NotImplementedError('NOT not supported yet') - if names is None: names = {} if values is None: @@ -61,16 +59,28 @@ def get_filter_expression(expr, names, values): # Do substitutions for key, value in names.items(): expr = expr.replace(key, value) + + # Store correct types of values for use later + values_map = {} for key, value in values.items(): if 'N' in value: - expr.replace(key, float(value['N'])) + values_map[key] = float(value['N']) + elif 'BOOL' in value: + values_map[key] = value['BOOL'] + elif 'S' in value: + values_map[key] = value['S'] + elif 'NS' in value: + values_map[key] = tuple(value['NS']) + elif 'SS' in value: + values_map[key] = tuple(value['SS']) + elif 'L' in value: + values_map[key] = tuple(value['L']) else: - expr = expr.replace(key, value['S']) + raise NotImplementedError() # Remove all spaces, tbf we could just skip them in the next step. # The number of known options is really small so we can do a fair bit of cheating - #expr = list(re.sub('\s', '', expr)) # 'Id>5ANDattribute_exists(test)ORNOTlength<6' - expr = list(expr) + expr = list(expr.strip()) # DodgyTokenisation stage 1 def is_value(val): @@ -122,39 +132,42 @@ def get_filter_expression(expr, names, values): return val in ('<', '>', '=', '>=', '<=', '<>', 'BETWEEN', 'IN', 'AND', 'OR', 'NOT') # DodgyTokenisation stage 2, it groups together some elements to make RPN'ing it later easier. - tokens2 = [] - token_iterator = iter(tokens) - for token in token_iterator: - if token == '(': - tuple_list = [] + def handle_token(token, tokens2, token_iterator): + # ok so this essentially groups up some tokens to make later parsing easier, + # when it encounters brackets it will recurse and then unrecurse when RecursionStopIteration is raised. + if token == ')': + raise RecursionStopIteration() # Should be recursive so this should work + elif token == '(': + temp_list = [] - next_token = six.next(token_iterator) - while next_token != ')': - try: - next_token = int(next_token) - except ValueError: - try: - next_token = float(next_token) - except ValueError: - pass - tuple_list.append(next_token) - next_token = six.next(token_iterator) + try: + while True: + next_token = six.next(token_iterator) + handle_token(next_token, temp_list, token_iterator) + except RecursionStopIteration: + pass # Continue + except StopIteration: + ValueError('Malformed filter expression, type1') # Sigh, we only want to group a tuple if it doesnt contain operators - if any([is_op(item) for item in tuple_list]): + if any([is_op(item) for item in temp_list]): + # Its an expression tokens2.append('(') - tokens2.extend(tuple_list) + tokens2.extend(temp_list) tokens2.append(')') else: - tokens2.append(tuple(tuple_list)) + tokens2.append(tuple(temp_list)) elif token == 'BETWEEN': field = tokens2.pop() - op1 = int(six.next(token_iterator)) + # if values map contains a number, it would be a float + # so we need to int() it anyway + op1 = six.next(token_iterator) + op1 = int(values_map.get(op1, op1)) and_op = six.next(token_iterator) assert and_op == 'AND' - op2 = int(six.next(token_iterator)) + op2 = six.next(token_iterator) + op2 = int(values_map.get(op2, op2)) tokens2.append(['between', field, op1, op2]) - elif is_function(token): function_list = [token] @@ -167,16 +180,21 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) tokens2.append(function_list) - else: - try: - token = int(token) - except ValueError: - try: - token = float(token) - except ValueError: - pass - tokens2.append(token) + # Convert tokens back to real types + if token in values_map: + token = values_map[token] + + # Need to join >= <= <> + if len(tokens2) > 0 and ((tokens2[-1] == '>' and token == '=') or (tokens2[-1] == '<' and token == '=') or (tokens2[-1] == '<' and token == '>')): + tokens2.append(tokens2.pop() + token) + else: + tokens2.append(token) + + tokens2 = [] + token_iterator = iter(tokens) + for token in token_iterator: + handle_token(token, tokens2, token_iterator) # Start of the Shunting-Yard algorithm. <-- Proper beast algorithm! def is_number(val): @@ -205,7 +223,9 @@ def get_filter_expression(expr, names, values): output.append(token) else: # Must be operator kw - while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token]: + + # Cheat, NOT is our only RIGHT associative operator, should really have dict of operator associativity + while len(op_stack) > 0 and OPS[op_stack[-1]] <= OPS[token] and op_stack[-1] != 'NOT': output.append(op_stack.pop()) op_stack.append(token) while len(op_stack) > 0: @@ -229,17 +249,22 @@ def get_filter_expression(expr, names, values): stack = [] for token in output: if is_op(token): - op2 = stack.pop() - op1 = stack.pop() - op_cls = OP_CLASS[token] + + if token == 'NOT': + op1 = stack.pop() + op2 = True + else: + op2 = stack.pop() + op1 = stack.pop() + stack.append(op_cls(op1, op2)) else: stack.append(to_func(token)) result = stack.pop(0) if len(stack) > 0: - raise ValueError('Malformed filter expression') + raise ValueError('Malformed filter expression, type2') return result @@ -300,6 +325,18 @@ class Func(object): return 'Func(...)'.format(self.FUNC) +class OpNot(Op): + OP = 'NOT' + + def expr(self, item): + lhs = self._lhs(item) + + return not lhs + + def __str__(self): + return '({0} {1})'.format(self.OP, self.lhs) + + class OpAnd(Op): OP = 'AND' @@ -470,6 +507,7 @@ class FuncBetween(Func): OP_CLASS = { + 'NOT': OpNot, 'AND': OpAnd, 'OR': OpOr, 'IN': OpIn, diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index bec72d327..0a48c277a 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -1,13 +1,16 @@ from __future__ import unicode_literals from collections import defaultdict +import copy import datetime import decimal import json import re +import boto3 from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import unix_time +from moto.core.exceptions import JsonRESTError from .comparisons import get_comparison_func, get_filter_expression, Op @@ -146,9 +149,38 @@ class Item(BaseModel): key = key.strip() value = value.strip() if value in expression_attribute_values: - self.attrs[key] = DynamoType(expression_attribute_values[value]) + value = DynamoType(expression_attribute_values[value]) else: - self.attrs[key] = DynamoType({"S": value}) + value = DynamoType({"S": value}) + + if '.' not in key: + self.attrs[key] = value + else: + # Handle nested dict updates + key_parts = key.split('.') + attr = key_parts.pop(0) + if attr not in self.attrs: + raise ValueError() + + last_val = self.attrs[attr].value + for key_part in key_parts: + # Hack but it'll do, traverses into a dict + if list(last_val.keys())[0] == 'M': + last_val = last_val['M'] + + if key_part not in last_val: + raise ValueError() + + last_val = last_val[key_part] + + # We have reference to a nested object but we cant just assign to it + current_type = list(last_val.keys())[0] + if current_type == value.type: + last_val[current_type] = value.value + else: + last_val[value.type] = value.value + del last_val[current_type] + elif action == 'ADD': key, value = value.split(" ", 1) key = key.strip() @@ -271,6 +303,10 @@ class Table(BaseModel): self.items = defaultdict(dict) self.table_arn = self._generate_arn(table_name) self.tags = [] + self.ttl = { + 'TimeToLiveStatus': 'DISABLED' # One of 'ENABLING'|'DISABLING'|'ENABLED'|'DISABLED', + # 'AttributeName': 'string' # Can contain this + } def _generate_arn(self, name): return 'arn:aws:dynamodb:us-east-1:123456789011:table/' + name @@ -413,7 +449,7 @@ class Table(BaseModel): def query(self, hash_key, range_comparison, range_objs, limit, exclusive_start_key, scan_index_forward, projection_expression, - index_name=None, **filter_kwargs): + index_name=None, filter_expression=None, **filter_kwargs): results = [] if index_name: all_indexes = (self.global_indexes or []) + (self.indexes or []) @@ -486,7 +522,8 @@ class Table(BaseModel): if projection_expression: expressions = [x.strip() for x in projection_expression.split(',')] - for result in possible_results: + results = copy.deepcopy(results) + for result in results: for attr in list(result.attrs): if attr not in expressions: result.attrs.pop(attr) @@ -496,6 +533,9 @@ class Table(BaseModel): scanned_count = len(list(self.all_items())) + if filter_expression is not None: + results = [item for item in results if filter_expression.expr(item)] + results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) return results, scanned_count, last_evaluated_key @@ -577,9 +617,16 @@ class Table(BaseModel): class DynamoDBBackend(BaseBackend): - def __init__(self): + def __init__(self, region_name=None): + self.region_name = region_name self.tables = OrderedDict() + def reset(self): + region_name = self.region_name + + self.__dict__ = {} + self.__init__(region_name) + def create_table(self, name, **params): if name in self.tables: return None @@ -595,6 +642,11 @@ class DynamoDBBackend(BaseBackend): if self.tables[table].table_arn == table_arn: self.tables[table].tags.extend(tags) + def untag_resource(self, table_arn, tag_keys): + for table in self.tables: + if self.tables[table].table_arn == table_arn: + self.tables[table].tags = [tag for tag in self.tables[table].tags if tag['Key'] not in tag_keys] + def list_tags_of_resource(self, table_arn): required_table = None for table in self.tables: @@ -689,7 +741,9 @@ class DynamoDBBackend(BaseBackend): return table.get_item(hash_key, range_key) def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts, - limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, **filter_kwargs): + limit, exclusive_start_key, scan_index_forward, projection_expression, index_name=None, + expr_names=None, expr_values=None, filter_expression=None, + **filter_kwargs): table = self.tables.get(table_name) if not table: return None, None @@ -698,8 +752,13 @@ class DynamoDBBackend(BaseBackend): range_values = [DynamoType(range_value) for range_value in range_value_dicts] + if filter_expression is not None: + filter_expression = get_filter_expression(filter_expression, expr_names, expr_values) + else: + filter_expression = Op(None, None) # Will always eval to true + return table.query(hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, projection_expression, index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name, filter_expression, **filter_kwargs) def scan(self, table_name, filters, limit, exclusive_start_key, filter_expression, expr_names, expr_values): table = self.tables.get(table_name) @@ -796,5 +855,28 @@ class DynamoDBBackend(BaseBackend): hash_key, range_key = self.get_keys_value(table, keys) return table.delete_item(hash_key, range_key) + def update_ttl(self, table_name, ttl_spec): + table = self.tables.get(table_name) + if table is None: + raise JsonRESTError('ResourceNotFound', 'Table not found') -dynamodb_backend2 = DynamoDBBackend() + if 'Enabled' not in ttl_spec or 'AttributeName' not in ttl_spec: + raise JsonRESTError('InvalidParameterValue', + 'TimeToLiveSpecification does not contain Enabled and AttributeName') + + if ttl_spec['Enabled']: + table.ttl['TimeToLiveStatus'] = 'ENABLED' + else: + table.ttl['TimeToLiveStatus'] = 'DISABLED' + table.ttl['AttributeName'] = ttl_spec['AttributeName'] + + def describe_ttl(self, table_name): + table = self.tables.get(table_name) + if table is None: + raise JsonRESTError('ResourceNotFound', 'Table not found') + + return table.ttl + + +available_regions = boto3.session.Session().get_available_regions("dynamodb") +dynamodb_backends = {region: DynamoDBBackend(region_name=region) for region in available_regions} diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 75e625c73..952d33efa 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -4,8 +4,8 @@ import six import re from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores -from .models import dynamodb_backend2, dynamo_json_dump +from moto.core.utils import camelcase_to_underscores, amzn_request_id +from .models import dynamodb_backends, dynamo_json_dump class DynamoHandler(BaseResponse): @@ -24,6 +24,15 @@ class DynamoHandler(BaseResponse): def error(self, type_, message, status=400): return status, self.response_headers, dynamo_json_dump({'__type': type_, 'message': message}) + @property + def dynamodb_backend(self): + """ + :return: DynamoDB2 Backend + :rtype: moto.dynamodb2.models.DynamoDBBackend + """ + return dynamodb_backends[self.region] + + @amzn_request_id def call_action(self): self.body = json.loads(self.body or '{}') endpoint = self.get_endpoint_name(self.headers) @@ -45,10 +54,10 @@ class DynamoHandler(BaseResponse): limit = body.get('Limit', 100) if body.get("ExclusiveStartTableName"): last = body.get("ExclusiveStartTableName") - start = list(dynamodb_backend2.tables.keys()).index(last) + 1 + start = list(self.dynamodb_backend.tables.keys()).index(last) + 1 else: start = 0 - all_tables = list(dynamodb_backend2.tables.keys()) + all_tables = list(self.dynamodb_backend.tables.keys()) if limit: tables = all_tables[start:start + limit] else: @@ -56,6 +65,7 @@ class DynamoHandler(BaseResponse): response = {"TableNames": tables} if limit and len(all_tables) > start + limit: response["LastEvaluatedTableName"] = tables[-1] + return dynamo_json_dump(response) def create_table(self): @@ -72,12 +82,12 @@ class DynamoHandler(BaseResponse): global_indexes = body.get("GlobalSecondaryIndexes", []) local_secondary_indexes = body.get("LocalSecondaryIndexes", []) - table = dynamodb_backend2.create_table(table_name, - schema=key_schema, - throughput=throughput, - attr=attr, - global_indexes=global_indexes, - indexes=local_secondary_indexes) + table = self.dynamodb_backend.create_table(table_name, + schema=key_schema, + throughput=throughput, + attr=attr, + global_indexes=global_indexes, + indexes=local_secondary_indexes) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -86,7 +96,7 @@ class DynamoHandler(BaseResponse): def delete_table(self): name = self.body['TableName'] - table = dynamodb_backend2.delete_table(name) + table = self.dynamodb_backend.delete_table(name) if table is not None: return dynamo_json_dump(table.describe()) else: @@ -94,15 +104,21 @@ class DynamoHandler(BaseResponse): return self.error(er, 'Requested resource not found') def tag_resource(self): - tags = self.body['Tags'] table_arn = self.body['ResourceArn'] - dynamodb_backend2.tag_resource(table_arn, tags) - return json.dumps({}) + tags = self.body['Tags'] + self.dynamodb_backend.tag_resource(table_arn, tags) + return '' + + def untag_resource(self): + table_arn = self.body['ResourceArn'] + tags = self.body['TagKeys'] + self.dynamodb_backend.untag_resource(table_arn, tags) + return '' def list_tags_of_resource(self): try: table_arn = self.body['ResourceArn'] - all_tags = dynamodb_backend2.list_tags_of_resource(table_arn) + all_tags = self.dynamodb_backend.list_tags_of_resource(table_arn) all_tag_keys = [tag['Key'] for tag in all_tags] marker = self.body.get('NextToken') if marker: @@ -125,17 +141,17 @@ class DynamoHandler(BaseResponse): def update_table(self): name = self.body['TableName'] if 'GlobalSecondaryIndexUpdates' in self.body: - table = dynamodb_backend2.update_table_global_indexes( + table = self.dynamodb_backend.update_table_global_indexes( name, self.body['GlobalSecondaryIndexUpdates']) if 'ProvisionedThroughput' in self.body: throughput = self.body["ProvisionedThroughput"] - table = dynamodb_backend2.update_table_throughput(name, throughput) + table = self.dynamodb_backend.update_table_throughput(name, throughput) return dynamo_json_dump(table.describe()) def describe_table(self): name = self.body['TableName'] try: - table = dynamodb_backend2.tables[name] + table = self.dynamodb_backend.tables[name] except KeyError: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') @@ -186,8 +202,7 @@ class DynamoHandler(BaseResponse): expected[not_exists_m.group(1)] = {'Exists': False} try: - result = dynamodb_backend2.put_item( - name, item, expected, overwrite) + result = self.dynamodb_backend.put_item(name, item, expected, overwrite) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -212,10 +227,10 @@ class DynamoHandler(BaseResponse): request = list(table_request.values())[0] if request_type == 'PutRequest': item = request['Item'] - dynamodb_backend2.put_item(table_name, item) + self.dynamodb_backend.put_item(table_name, item) elif request_type == 'DeleteRequest': keys = request['Key'] - item = dynamodb_backend2.delete_item(table_name, keys) + item = self.dynamodb_backend.delete_item(table_name, keys) response = { "ConsumedCapacity": [ @@ -235,7 +250,7 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] key = self.body['Key'] try: - item = dynamodb_backend2.get_item(name, key) + item = self.dynamodb_backend.get_item(name, key) except ValueError: er = 'com.amazon.coral.validate#ValidationException' return self.error(er, 'Validation Exception') @@ -266,7 +281,7 @@ class DynamoHandler(BaseResponse): attributes_to_get = table_request.get('AttributesToGet') results["Responses"][table_name] = [] for key in keys: - item = dynamodb_backend2.get_item(table_name, key) + item = self.dynamodb_backend.get_item(table_name, key) if item: item_describe = item.describe_attrs(attributes_to_get) results["Responses"][table_name].append( @@ -283,7 +298,9 @@ class DynamoHandler(BaseResponse): # {u'KeyConditionExpression': u'#n0 = :v0', u'ExpressionAttributeValues': {u':v0': {u'S': u'johndoe'}}, u'ExpressionAttributeNames': {u'#n0': u'username'}} key_condition_expression = self.body.get('KeyConditionExpression') projection_expression = self.body.get('ProjectionExpression') - expression_attribute_names = self.body.get('ExpressionAttributeNames') + expression_attribute_names = self.body.get('ExpressionAttributeNames', {}) + filter_expression = self.body.get('FilterExpression') + expression_attribute_values = self.body.get('ExpressionAttributeValues', {}) if projection_expression and expression_attribute_names: expressions = [x.strip() for x in projection_expression.split(',')] @@ -292,10 +309,11 @@ class DynamoHandler(BaseResponse): projection_expression = projection_expression.replace(expression, expression_attribute_names[expression]) filter_kwargs = {} - if key_condition_expression: - value_alias_map = self.body['ExpressionAttributeValues'] - table = dynamodb_backend2.get_table(name) + if key_condition_expression: + value_alias_map = self.body.get('ExpressionAttributeValues', {}) + + table = self.dynamodb_backend.get_table(name) # If table does not exist if table is None: @@ -318,7 +336,7 @@ class DynamoHandler(BaseResponse): index = table.schema reverse_attribute_lookup = dict((v, k) for k, v in - six.iteritems(self.body['ExpressionAttributeNames'])) + six.iteritems(self.body.get('ExpressionAttributeNames', {}))) if " AND " in key_condition_expression: expressions = key_condition_expression.split(" AND ", 1) @@ -357,13 +375,14 @@ class DynamoHandler(BaseResponse): range_values = [] hash_key_value_alias = hash_key_expression.split("=")[1].strip() - hash_key = value_alias_map[hash_key_value_alias] + # Temporary fix until we get proper KeyConditionExpression function + hash_key = value_alias_map.get(hash_key_value_alias, {'S': hash_key_value_alias}) else: # 'KeyConditions': {u'forum_name': {u'ComparisonOperator': u'EQ', u'AttributeValueList': [{u'S': u'the-key'}]}} key_conditions = self.body.get('KeyConditions') query_filters = self.body.get("QueryFilter") if key_conditions: - hash_key_name, range_key_name = dynamodb_backend2.get_table_keys_name( + hash_key_name, range_key_name = self.dynamodb_backend.get_table_keys_name( name, key_conditions.keys()) for key, value in key_conditions.items(): if key not in (hash_key_name, range_key_name): @@ -396,9 +415,12 @@ class DynamoHandler(BaseResponse): exclusive_start_key = self.body.get('ExclusiveStartKey') limit = self.body.get("Limit") scan_index_forward = self.body.get("ScanIndexForward") - items, scanned_count, last_evaluated_key = dynamodb_backend2.query( + items, scanned_count, last_evaluated_key = self.dynamodb_backend.query( name, hash_key, range_comparison, range_values, limit, - exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, **filter_kwargs) + exclusive_start_key, scan_index_forward, projection_expression, index_name=index_name, + expr_names=expression_attribute_names, expr_values=expression_attribute_values, + filter_expression=filter_expression, **filter_kwargs + ) if items is None: er = 'com.amazonaws.dynamodb.v20111205#ResourceNotFoundException' return self.error(er, 'Requested resource not found') @@ -440,12 +462,12 @@ class DynamoHandler(BaseResponse): limit = self.body.get("Limit") try: - items, scanned_count, last_evaluated_key = dynamodb_backend2.scan(name, filters, - limit, - exclusive_start_key, - filter_expression, - expression_attribute_names, - expression_attribute_values) + items, scanned_count, last_evaluated_key = self.dynamodb_backend.scan(name, filters, + limit, + exclusive_start_key, + filter_expression, + expression_attribute_names, + expression_attribute_values) except ValueError as err: er = 'com.amazonaws.dynamodb.v20111205#ValidationError' return self.error(er, 'Bad Filter Expression: {0}'.format(err)) @@ -476,12 +498,12 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] keys = self.body['Key'] return_values = self.body.get('ReturnValues', '') - table = dynamodb_backend2.get_table(name) + table = self.dynamodb_backend.get_table(name) if not table: er = 'com.amazonaws.dynamodb.v20120810#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') - item = dynamodb_backend2.delete_item(name, keys) + item = self.dynamodb_backend.delete_item(name, keys) if item and return_values == 'ALL_OLD': item_dict = item.to_json() else: @@ -498,7 +520,7 @@ class DynamoHandler(BaseResponse): 'ExpressionAttributeNames', {}) expression_attribute_values = self.body.get( 'ExpressionAttributeValues', {}) - existing_item = dynamodb_backend2.get_item(name, key) + existing_item = self.dynamodb_backend.get_item(name, key) if 'Expected' in self.body: expected = self.body['Expected'] @@ -534,9 +556,10 @@ class DynamoHandler(BaseResponse): '\s*([=\+-])\s*', '\\1', update_expression) try: - item = dynamodb_backend2.update_item( - name, key, update_expression, attribute_updates, expression_attribute_names, expression_attribute_values, - expected) + item = self.dynamodb_backend.update_item( + name, key, update_expression, attribute_updates, expression_attribute_names, + expression_attribute_values, expected + ) except ValueError: er = 'com.amazonaws.dynamodb.v20111205#ConditionalCheckFailedException' return self.error(er, 'A condition specified in the operation could not be evaluated.') @@ -553,3 +576,26 @@ class DynamoHandler(BaseResponse): item_dict['Attributes'] = {} return dynamo_json_dump(item_dict) + + def describe_limits(self): + return json.dumps({ + 'AccountMaxReadCapacityUnits': 20000, + 'TableMaxWriteCapacityUnits': 10000, + 'AccountMaxWriteCapacityUnits': 20000, + 'TableMaxReadCapacityUnits': 10000 + }) + + def update_time_to_live(self): + name = self.body['TableName'] + ttl_spec = self.body['TimeToLiveSpecification'] + + self.dynamodb_backend.update_ttl(name, ttl_spec) + + return json.dumps({'TimeToLiveSpecification': ttl_spec}) + + def describe_time_to_live(self): + name = self.body['TableName'] + + ttl_spec = self.dynamodb_backend.describe_ttl(name) + + return json.dumps({'TimeToLiveDescription': ttl_spec}) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index f8090e783..502122969 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -2,10 +2,12 @@ from __future__ import unicode_literals import copy import itertools +import ipaddress import json -import os import re import six +import warnings +from pkg_resources import resource_filename import boto.ec2 @@ -44,7 +46,6 @@ from .exceptions import ( InvalidRouteTableIdError, InvalidRouteError, InvalidInstanceIdError, - MalformedAMIIdError, InvalidAMIIdError, InvalidAMIAttributeItemValueError, InvalidSnapshotIdError, @@ -112,8 +113,12 @@ from .utils import ( tag_filter_matches, ) -RESOURCES_DIR = os.path.join(os.path.dirname(__file__), 'resources') -INSTANCE_TYPES = json.load(open(os.path.join(RESOURCES_DIR, 'instance_types.json'), 'r')) +INSTANCE_TYPES = json.load( + open(resource_filename(__name__, 'resources/instance_types.json'), 'r') +) +AMIS = json.load( + open(resource_filename(__name__, 'resources/amis.json'), 'r') +) def utc_date_and_time(): @@ -372,6 +377,7 @@ class Instance(TaggedEC2Resource, BotoInstance): self.subnet_id = kwargs.get("subnet_id") in_ec2_classic = not bool(self.subnet_id) self.key_name = kwargs.get("key_name") + self.ebs_optimized = kwargs.get("ebs_optimized", False) self.source_dest_check = "true" self.launch_time = utc_date_and_time() self.disable_api_termination = kwargs.get("disable_api_termination", False) @@ -383,6 +389,11 @@ class Instance(TaggedEC2Resource, BotoInstance): amis = self.ec2_backend.describe_images(filters={'image-id': image_id}) ami = amis[0] if amis else None + if ami is None: + warnings.warn('Could not find AMI with image-id:{0}, ' + 'in the near future this will ' + 'cause an error'.format(image_id), + PendingDeprecationWarning) self.platform = ami.platform if ami else None self.virtualization_type = ami.virtualization_type if ami else 'paravirtual' @@ -402,6 +413,10 @@ class Instance(TaggedEC2Resource, BotoInstance): subnet = ec2_backend.get_subnet(self.subnet_id) self.vpc_id = subnet.vpc_id self._placement.zone = subnet.availability_zone + + if associate_public_ip is None: + # Mapping public ip hasnt been explicitly enabled or disabled + associate_public_ip = subnet.map_public_ip_on_launch == 'true' elif placement: self._placement.zone = placement else: @@ -409,10 +424,22 @@ class Instance(TaggedEC2Resource, BotoInstance): self.block_device_mapping = BlockDeviceMapping() - self.prep_nics(kwargs.get("nics", {}), - subnet_id=self.subnet_id, - private_ip=kwargs.get("private_ip"), - associate_public_ip=associate_public_ip) + self._private_ips = set() + self.prep_nics( + kwargs.get("nics", {}), + private_ip=kwargs.get("private_ip"), + associate_public_ip=associate_public_ip + ) + + def __del__(self): + try: + subnet = self.ec2_backend.get_subnet(self.subnet_id) + for ip in self._private_ips: + subnet.del_subnet_ip(ip) + except Exception: + # Its not "super" critical we clean this up, as reset will do this + # worst case we'll get IP address exaustion... rarely + pass def setup_defaults(self): # Default have an instance with root volume should you not wish to @@ -547,14 +574,23 @@ class Instance(TaggedEC2Resource, BotoInstance): else: return self.security_groups - def prep_nics(self, nic_spec, subnet_id=None, private_ip=None, associate_public_ip=None): + def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None): self.nics = {} - if not private_ip: + if self.subnet_id: + subnet = self.ec2_backend.get_subnet(self.subnet_id) + if not private_ip: + private_ip = subnet.get_available_subnet_ip(instance=self) + else: + subnet.request_ip(private_ip, instance=self) + + self._private_ips.add(private_ip) + elif private_ip is None: + # Preserve old behaviour if in EC2-Classic mode private_ip = random_private_ip() # Primary NIC defaults - primary_nic = {'SubnetId': subnet_id, + primary_nic = {'SubnetId': self.subnet_id, 'PrivateIpAddress': private_ip, 'AssociatePublicIpAddress': associate_public_ip} primary_nic = dict((k, v) for k, v in primary_nic.items() if v) @@ -765,14 +801,12 @@ class InstanceBackend(object): associated with the given instance_ids. """ reservations = [] - for reservation in self.all_reservations(make_copy=True): + for reservation in self.all_reservations(): reservation_instance_ids = [ instance.id for instance in reservation.instances] matching_reservation = any( instance_id in reservation_instance_ids for instance_id in instance_ids) if matching_reservation: - # We need to make a copy of the reservation because we have to modify the - # instances to limit to those requested reservation.instances = [ instance for instance in reservation.instances if instance.id in instance_ids] reservations.append(reservation) @@ -786,15 +820,8 @@ class InstanceBackend(object): reservations = filter_reservations(reservations, filters) return reservations - def all_reservations(self, make_copy=False, filters=None): - if make_copy: - # Return copies so that other functions can modify them with changing - # the originals - reservations = [copy.deepcopy(reservation) - for reservation in self.reservations.values()] - else: - reservations = [ - reservation for reservation in self.reservations.values()] + def all_reservations(self, filters=None): + reservations = [copy.copy(reservation) for reservation in self.reservations.values()] if filters is not None: reservations = filter_reservations(reservations, filters) return reservations @@ -984,17 +1011,31 @@ class TagBackend(object): class Ami(TaggedEC2Resource): def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None): + name=None, description=None, owner_id=None, + + public=False, virtualization_type=None, architecture=None, + state='available', creation_date=None, platform=None, + image_type='machine', image_location=None, hypervisor=None, + root_device_type=None, root_device_name=None, sriov='simple', + region_name='us-east-1a' + ): self.ec2_backend = ec2_backend self.id = ami_id - self.state = "available" + self.state = state self.name = name + self.image_type = image_type + self.image_location = image_location + self.owner_id = owner_id self.description = description - self.virtualization_type = None - self.architecture = None + self.virtualization_type = virtualization_type + self.architecture = architecture self.kernel_id = None - self.platform = None - self.creation_date = utc_date_and_time() + self.platform = platform + self.hypervisor = hypervisor + self.root_device_name = root_device_name + self.root_device_type = root_device_type + self.sriov = sriov + self.creation_date = utc_date_and_time() if creation_date is None else creation_date if instance: self.instance = instance @@ -1022,8 +1063,11 @@ class Ami(TaggedEC2Resource): self.launch_permission_groups = set() self.launch_permission_users = set() + if public: + self.launch_permission_groups.add('all') + # AWS auto-creates these, we should reflect the same. - volume = self.ec2_backend.create_volume(15, "us-east-1a") + volume = self.ec2_backend.create_volume(15, region_name) self.ebs_snapshot = self.ec2_backend.create_snapshot( volume.id, "Auto-created snapshot for AMI %s" % self.id) @@ -1050,6 +1094,8 @@ class Ami(TaggedEC2Resource): return self.state elif filter_name == 'name': return self.name + elif filter_name == 'owner-id': + return self.owner_id else: return super(Ami, self).get_filter_value( filter_name, 'DescribeImages') @@ -1058,14 +1104,22 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): def __init__(self): self.amis = {} + + self._load_amis() + super(AmiBackend, self).__init__() - def create_image(self, instance_id, name=None, description=None): + def _load_amis(self): + for ami in AMIS: + ami_id = ami['ami_id'] + self.amis[ami_id] = Ami(self, **ami) + + def create_image(self, instance_id, name=None, description=None, owner_id=None): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) ami = Ami(self, ami_id, instance=instance, source_ami=None, - name=name, description=description) + name=name, description=description, owner_id=owner_id) self.amis[ami_id] = ami return ami @@ -1078,30 +1132,29 @@ class AmiBackend(object): self.amis[ami_id] = ami return ami - def describe_images(self, ami_ids=(), filters=None, exec_users=None): - images = [] + def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None): + images = self.amis.values() + + # Limit images by launch permissions if exec_users: - for ami_id in self.amis: - found = False + tmp_images = [] + for ami in images: for user_id in exec_users: - if user_id in self.amis[ami_id].launch_permission_users: - found = True - if found: - images.append(self.amis[ami_id]) - if images == []: - return images + if user_id in ami.launch_permission_users: + tmp_images.append(ami) + images = tmp_images + + # Limit by owner ids + if owners: + images = [ami for ami in images if ami.owner_id in owners] + + if ami_ids: + images = [ami for ami in images if ami.id in ami_ids] + + # Generic filters if filters: - images = images or self.amis.values() return generic_filter(filters, images) - else: - for ami_id in ami_ids: - if ami_id in self.amis: - images.append(self.amis[ami_id]) - elif not ami_id.startswith("ami-"): - raise MalformedAMIIdError(ami_id) - else: - raise InvalidAMIIdError(ami_id) - return images or self.amis.values() + return images def deregister_image(self, ami_id): if ami_id in self.amis: @@ -2123,10 +2176,17 @@ class Subnet(TaggedEC2Resource): self.id = subnet_id self.vpc_id = vpc_id self.cidr_block = cidr_block + self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block)) self._availability_zone = availability_zone self.default_for_az = default_for_az self.map_public_ip_on_launch = map_public_ip_on_launch + # Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8) + self._subnet_ip_generator = self.cidr.hosts() + self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS + self._unused_ips = set() # if instance is destroyed hold IP here for reuse + self._subnet_ips = {} # has IP: instance + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -2193,6 +2253,46 @@ class Subnet(TaggedEC2Resource): '"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"') raise UnformattedGetAttTemplateException() + def get_available_subnet_ip(self, instance): + try: + new_ip = self._unused_ips.pop() + except KeyError: + new_ip = six.next(self._subnet_ip_generator) + + # Skips any IP's if they've been manually specified + while str(new_ip) in self._subnet_ips: + new_ip = six.next(self._subnet_ip_generator) + + if new_ip == self.cidr.broadcast_address: + raise StopIteration() # Broadcast address cant be used obviously + # TODO StopIteration will be raised if no ip's available, not sure how aws handles this. + + new_ip = str(new_ip) + self._subnet_ips[new_ip] = instance + + return new_ip + + def request_ip(self, ip, instance): + if ipaddress.ip_address(ip) not in self.cidr: + raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr)) + + if ip in self._subnet_ips: + raise Exception('IP already in use') + try: + self._unused_ips.remove(ip) + except KeyError: + pass + + self._subnet_ips[ip] = instance + return ip + + def del_subnet_ip(self, ip): + try: + del self._subnet_ips[ip] + self._unused_ips.add(ip) + except KeyError: + pass # Unknown IP + class SubnetBackend(object): def __init__(self): @@ -3615,8 +3715,8 @@ class NatGatewayBackend(object): return self.nat_gateways.pop(nat_gateway_id) -class EC2Backend(BaseBackend, InstanceBackend, TagBackend, AmiBackend, - RegionsAndZonesBackend, SecurityGroupBackend, EBSBackend, +class EC2Backend(BaseBackend, InstanceBackend, TagBackend, EBSBackend, + RegionsAndZonesBackend, SecurityGroupBackend, AmiBackend, VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend, NetworkInterfaceBackend, VPNConnectionBackend, VPCPeeringConnectionBackend, diff --git a/moto/ec2/resources/amis.json b/moto/ec2/resources/amis.json new file mode 100644 index 000000000..5cc3122f3 --- /dev/null +++ b/moto/ec2/resources/amis.json @@ -0,0 +1,546 @@ +[ + { + "ami_id": "ami-03cf127a", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Nano Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Nano-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-12c6146b", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2008 R2 SP1 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2008-R2_SP1-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1812c061", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1e749f67", + "state": "available", + "public": true, + "owner_id": "099720109477", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Canonical, Ubuntu, 14.04 LTS, amd64 trusty image build on 2017-07-27", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "ubuntu/images/hvm-ssd/ubuntu-trusty-14.04-amd64-server-20170727", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1ecc1e67", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-1f12c066", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Express 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-24f3215d", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-35e92e4c", + "state": "available", + "public": true, + "owner_id": "013907871322", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "SUSE Linux Enterprise Server 12 SP3 (HVM, 64-bit, SSD-Backed)", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "suse-sles-12-sp3-v20170907-hvm-ssd-x86_64", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-3bf32142", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-3df32144", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Enterprise 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-56ec3e2f", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Express 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-61db0918", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2003 R2 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2003-R2_SP2-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-6ef02217", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Web 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-760aaa0f", + "state": "available", + "public": true, + "owner_id": "137112412989", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 HVM GP2", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "amzn-ami-hvm-2017.09.1.20171103-x86_64-gp2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-77ed3f0e", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Full Locale English with SQL Enterprise 2016 SP1 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-785db401", + "state": "available", + "public": true, + "owner_id": "099720109477", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Canonical, Ubuntu, 16.04 LTS, amd64 xenial image build on 2017-07-21", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20170721", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-8104a4f8", + "state": "available", + "public": true, + "owner_id": "137112412989", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Amazon Linux AMI 2017.09.1.20171103 x86_64 PV EBS", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "amzn-ami-pv-2017.09.1.20171103-x86_64-ebs", + "virtualization_type": "paravirtual", + "hypervisor": "xen" + }, + { + "ami_id": "ami-84ee3cfd", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Web 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-86ee3cff", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Standard 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-999844e0", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "Deep Learning on Amazon Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Deep Learning AMI Amazon Linux - 3.3_Oct2017", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-9b32e8e2", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "CUDA9 Classic Ubuntu DLAMI 1508914531", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Ubuntu CUDA9 DLAMI with MXNet/TF/Caffe2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-a9cc1ed0", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-afee3cd6", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Web 2016 SP1 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2016_SP1_Web-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-b7e93bce", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 with Desktop Experience Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-bb9a6bc2", + "state": "available", + "public": true, + "owner_id": "309956199498", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Provided by Red Hat, Inc.", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "RHEL-7.4_HVM_GA-20170808-x86_64-2-Hourly2-GP2", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-bceb39c5", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 with Containers Locale English AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-Containers-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-c2ff2dbb", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 RTM 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-RTM-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-c6f321bf", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Express 2014 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2014_SP2_Express-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-d1cb19a8", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2008 SP2 Datacenter 64-bit Locale English Base AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2008-SP2-English-64Bit-Base-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-dca37ea5", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Deep Learning on Ubuntu Linux with MXNet, Tensorflow, Caffe, Theano, Torch, CNTK and Keras", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "Deep Learning AMI Ubuntu Linux - 2.4_Oct2017", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f0e83a89", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2016 Locale English with SQL Enterprise 2017 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2016-English-Full-SQL_2017_Enterprise-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f4cf1d8d", + "state": "available", + "public": true, + "owner_id": "801119661308", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda1", + "description": "Microsoft Windows Server 2012 R2 RTM 64-bit Locale English with SQL Standard 2016 AMI provided by Amazon", + "image_type": "machine", + "platform": "windows", + "architecture": "x86_64", + "name": "Windows_Server-2012-R2_RTM-English-64Bit-SQL_2016_SP1_Standard-2017.10.13", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-f8e54081", + "state": "available", + "public": true, + "owner_id": "898082745236", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/xvda", + "description": "CUDA9 Classic Amazon Linux DLAMI 1508914924", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "CUDA9ClassicAmazonLinuxDLAMIwithMXNetTensorflowandCaffe2 ", + "virtualization_type": "hvm", + "hypervisor": "xen" + }, + { + "ami_id": "ami-fa7cdd89", + "state": "available", + "public": true, + "owner_id": "013907871322", + "sriov": "simple", + "root_device_type": "ebs", + "root_device_name": "/dev/sda", + "description": "SUSE Linux Enterprise Server 11 Service Pack 4 ((PV, 64-bit, SSD-Backed)", + "image_type": "machine", + "platform": null, + "architecture": "x86_64", + "name": "suse-sles-11-sp4-v20151207-pv-ssd-x86_64", + "virtualization_type": "paravirtual", + "hypervisor": "xen" + } +] \ No newline at end of file diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index c92471093..19e6d31a1 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -36,9 +36,10 @@ class AmisResponse(BaseResponse): def describe_images(self): ami_ids = self._get_multi_param('ImageId') filters = filters_from_querystring(self.querystring) + owners = self._get_multi_param('Owner') exec_users = self._get_multi_param('ExecutableBy') images = self.ec2_backend.describe_images( - ami_ids=ami_ids, filters=filters, exec_users=exec_users) + ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -92,12 +93,12 @@ DESCRIBE_IMAGES_RESPONSE = """ 0: + if not force and container_instance.running_tasks_count > 0: raise Exception("Found running tasks on the instance.") # Currently assume that people might want to do something based around deregistered instances # with tasks left running on them - but nothing if no tasks were running already - elif force and container_instance.running_task_count > 0: + elif force and container_instance.running_tasks_count > 0: if not self.container_instances.get('orphaned'): self.container_instances['orphaned'] = {} self.container_instances['orphaned'][container_instance_id] = container_instance @@ -766,7 +792,102 @@ class EC2ContainerServiceBackend(BaseBackend): raise Exception("{0} is not a cluster".format(cluster_name)) pass + def put_attributes(self, cluster_name, attributes=None): + if cluster_name is None or cluster_name not in self.clusters: + raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400) -ecs_backends = {} -for region, ec2_backend in ec2_backends.items(): - ecs_backends[region] = EC2ContainerServiceBackend() + if attributes is None: + raise JsonRESTError('InvalidParameterException', 'attributes value is required') + + for attr in attributes: + self._put_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType')) + + def _put_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None): + if target_id is None and target_type is None: + for instance in self.container_instances[cluster_name].values(): + instance.attributes[name] = value + elif target_type is None: + # targetId is full container instance arn + try: + arn = target_id.rsplit('/', 1)[-1] + self.container_instances[cluster_name][arn].attributes[name] = value + except KeyError: + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + else: + # targetId is container uuid, targetType must be container-instance + try: + if target_type != 'container-instance': + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + + self.container_instances[cluster_name][target_id].attributes[name] = value + except KeyError: + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + + def list_attributes(self, target_type, cluster_name=None, attr_name=None, attr_value=None, max_results=None, next_token=None): + if target_type != 'container-instance': + raise JsonRESTError('InvalidParameterException', 'targetType must be container-instance') + + filters = [lambda x: True] + + # item will be {0 cluster_name, 1 arn, 2 name, 3 value} + if cluster_name is not None: + filters.append(lambda item: item[0] == cluster_name) + if attr_name: + filters.append(lambda item: item[2] == attr_name) + if attr_name: + filters.append(lambda item: item[3] == attr_value) + + all_attrs = [] + for cluster_name, cobj in self.container_instances.items(): + for container_instance in cobj.values(): + for key, value in container_instance.attributes.items(): + all_attrs.append((cluster_name, container_instance.container_instance_arn, key, value)) + + return filter(lambda x: all(f(x) for f in filters), all_attrs) + + def delete_attributes(self, cluster_name, attributes=None): + if cluster_name is None or cluster_name not in self.clusters: + raise JsonRESTError('ClusterNotFoundException', 'Cluster not found', status=400) + + if attributes is None: + raise JsonRESTError('InvalidParameterException', 'attributes value is required') + + for attr in attributes: + self._delete_attribute(cluster_name, attr['name'], attr.get('value'), attr.get('targetId'), attr.get('targetType')) + + def _delete_attribute(self, cluster_name, name, value=None, target_id=None, target_type=None): + if target_id is None and target_type is None: + for instance in self.container_instances[cluster_name].values(): + if name in instance.attributes and instance.attributes[name] == value: + del instance.attributes[name] + elif target_type is None: + # targetId is full container instance arn + try: + arn = target_id.rsplit('/', 1)[-1] + instance = self.container_instances[cluster_name][arn] + if name in instance.attributes and instance.attributes[name] == value: + del instance.attributes[name] + except KeyError: + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + else: + # targetId is container uuid, targetType must be container-instance + try: + if target_type != 'container-instance': + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + + instance = self.container_instances[cluster_name][target_id] + if name in instance.attributes and instance.attributes[name] == value: + del instance.attributes[name] + except KeyError: + raise JsonRESTError('TargetNotFoundException', 'Could not find {0}'.format(target_id)) + + def list_task_definition_families(self, family_prefix=None, status=None, max_results=None, next_token=None): + for task_fam in self.task_definitions: + if family_prefix is not None and not task_fam.startswith(family_prefix): + continue + + yield task_fam + + +available_regions = boto3.session.Session().get_available_regions("ecs") +ecs_backends = {region: EC2ContainerServiceBackend(region) for region in available_regions} diff --git a/moto/ecs/responses.py b/moto/ecs/responses.py index 8f6fe850f..e81e04145 100644 --- a/moto/ecs/responses.py +++ b/moto/ecs/responses.py @@ -9,6 +9,12 @@ class EC2ContainerServiceResponse(BaseResponse): @property def ecs_backend(self): + """ + ECS Backend + + :return: ECS Backend object + :rtype: moto.ecs.models.EC2ContainerServiceBackend + """ return ecs_backends[self.region] @property @@ -34,7 +40,7 @@ class EC2ContainerServiceResponse(BaseResponse): cluster_arns = self.ecs_backend.list_clusters() return json.dumps({ 'clusterArns': cluster_arns - # 'nextToken': str(uuid.uuid1()) + # 'nextToken': str(uuid.uuid4()) }) def describe_clusters(self): @@ -66,7 +72,7 @@ class EC2ContainerServiceResponse(BaseResponse): task_definition_arns = self.ecs_backend.list_task_definitions() return json.dumps({ 'taskDefinitionArns': task_definition_arns - # 'nextToken': str(uuid.uuid1()) + # 'nextToken': str(uuid.uuid4()) }) def describe_task_definition(self): @@ -159,7 +165,7 @@ class EC2ContainerServiceResponse(BaseResponse): return json.dumps({ 'serviceArns': service_arns # , - # 'nextToken': str(uuid.uuid1()) + # 'nextToken': str(uuid.uuid4()) }) def describe_services(self): @@ -245,3 +251,62 @@ class EC2ContainerServiceResponse(BaseResponse): 'failures': [ci.response_object for ci in failures], 'containerInstances': [ci.response_object for ci in container_instances] }) + + def put_attributes(self): + cluster_name = self._get_param('cluster') + attributes = self._get_param('attributes') + + self.ecs_backend.put_attributes(cluster_name, attributes) + + return json.dumps({'attributes': attributes}) + + def list_attributes(self): + cluster_name = self._get_param('cluster') + attr_name = self._get_param('attributeName') + attr_value = self._get_param('attributeValue') + target_type = self._get_param('targetType') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + + results = self.ecs_backend.list_attributes(target_type, cluster_name, attr_name, attr_value, max_results, next_token) + # Result will be [item will be {0 cluster_name, 1 arn, 2 name, 3 value}] + + formatted_results = [] + for _, arn, name, value in results: + tmp_result = { + 'name': name, + 'targetId': arn + } + if value is not None: + tmp_result['value'] = value + formatted_results.append(tmp_result) + + return json.dumps({'attributes': formatted_results}) + + def delete_attributes(self): + cluster_name = self._get_param('cluster') + attributes = self._get_param('attributes') + + self.ecs_backend.delete_attributes(cluster_name, attributes) + + return json.dumps({'attributes': attributes}) + + def discover_poll_endpoint(self): + # Here are the arguments, this api is used by the ecs client so obviously no decent + # documentation. Hence I've responded with valid but useless data + # cluster_name = self._get_param('cluster') + # instance = self._get_param('containerInstance') + return json.dumps({ + 'endpoint': 'http://localhost', + 'telemetryEndpoint': 'http://localhost' + }) + + def list_task_definition_families(self): + family_prefix = self._get_param('familyPrefix') + status = self._get_param('status') + max_results = self._get_param('maxResults') + next_token = self._get_param('nextToken') + + results = self.ecs_backend.list_task_definition_families(family_prefix, status, max_results, next_token) + + return json.dumps({'families': list(results)}) diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 8aa9ee9f0..726b1a164 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -3,8 +3,12 @@ from __future__ import unicode_literals import datetime import re from moto.compat import OrderedDict +from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel from moto.ec2.models import ec2_backends +from moto.acm.models import acm_backends +from .utils import make_arn_for_target_group +from .utils import make_arn_for_load_balancer from .exceptions import ( DuplicateLoadBalancerName, DuplicateListenerError, @@ -40,33 +44,44 @@ class FakeHealthStatus(BaseModel): class FakeTargetGroup(BaseModel): + HTTP_CODE_REGEX = re.compile(r'(?:(?:\d+-\d+|\d+),?)+') + def __init__(self, name, arn, vpc_id, protocol, port, - healthcheck_protocol, - healthcheck_port, - healthcheck_path, - healthcheck_interval_seconds, - healthcheck_timeout_seconds, - healthy_threshold_count, - unhealthy_threshold_count): + healthcheck_protocol=None, + healthcheck_port=None, + healthcheck_path=None, + healthcheck_interval_seconds=None, + healthcheck_timeout_seconds=None, + healthy_threshold_count=None, + unhealthy_threshold_count=None, + matcher=None, + target_type=None): + + # TODO: default values differs when you add Network Load balancer self.name = name self.arn = arn self.vpc_id = vpc_id self.protocol = protocol self.port = port - self.healthcheck_protocol = healthcheck_protocol - self.healthcheck_port = healthcheck_port - self.healthcheck_path = healthcheck_path - self.healthcheck_interval_seconds = healthcheck_interval_seconds - self.healthcheck_timeout_seconds = healthcheck_timeout_seconds - self.healthy_threshold_count = healthy_threshold_count - self.unhealthy_threshold_count = unhealthy_threshold_count + self.healthcheck_protocol = healthcheck_protocol or 'HTTP' + self.healthcheck_port = healthcheck_port or 'traffic-port' + self.healthcheck_path = healthcheck_path or '/' + self.healthcheck_interval_seconds = healthcheck_interval_seconds or 30 + self.healthcheck_timeout_seconds = healthcheck_timeout_seconds or 5 + self.healthy_threshold_count = healthy_threshold_count or 5 + self.unhealthy_threshold_count = unhealthy_threshold_count or 2 self.load_balancer_arns = [] self.tags = {} + if matcher is None: + self.matcher = {'HttpCode': '200'} + else: + self.matcher = matcher + self.target_type = target_type self.attributes = { 'deregistration_delay.timeout_seconds': 300, @@ -75,6 +90,10 @@ class FakeTargetGroup(BaseModel): self.targets = OrderedDict() + @property + def physical_resource_id(self): + return self.arn + def register(self, targets): for target in targets: self.targets[target['id']] = { @@ -99,6 +118,46 @@ class FakeTargetGroup(BaseModel): raise InvalidTargetError() return FakeHealthStatus(t['id'], t['port'], self.healthcheck_port, 'healthy') + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + elbv2_backend = elbv2_backends[region_name] + + # per cloudformation docs: + # The target group name should be shorter than 22 characters because + # AWS CloudFormation uses the target group name to create the name of the load balancer. + name = properties.get('Name', resource_name[:22]) + vpc_id = properties.get("VpcId") + protocol = properties.get('Protocol') + port = properties.get("Port") + healthcheck_protocol = properties.get("HealthCheckProtocol") + healthcheck_port = properties.get("HealthCheckPort") + healthcheck_path = properties.get("HealthCheckPath") + healthcheck_interval_seconds = properties.get("HealthCheckIntervalSeconds") + healthcheck_timeout_seconds = properties.get("HealthCheckTimeoutSeconds") + healthy_threshold_count = properties.get("HealthyThresholdCount") + unhealthy_threshold_count = properties.get("UnhealthyThresholdCount") + matcher = properties.get("Matcher") + target_type = properties.get("TargetType") + + target_group = elbv2_backend.create_target_group( + name=name, + vpc_id=vpc_id, + protocol=protocol, + port=port, + healthcheck_protocol=healthcheck_protocol, + healthcheck_port=healthcheck_port, + healthcheck_path=healthcheck_path, + healthcheck_interval_seconds=healthcheck_interval_seconds, + healthcheck_timeout_seconds=healthcheck_timeout_seconds, + healthy_threshold_count=healthy_threshold_count, + unhealthy_threshold_count=unhealthy_threshold_count, + matcher=matcher, + target_type=target_type, + ) + return target_group + class FakeListener(BaseModel): @@ -109,6 +168,7 @@ class FakeListener(BaseModel): self.port = port self.ssl_policy = ssl_policy self.certificate = certificate + self.certificates = [certificate] if certificate is not None else [] self.default_actions = default_actions self._non_default_rules = [] self._default_rule = FakeRule( @@ -119,6 +179,10 @@ class FakeListener(BaseModel): is_default=True ) + @property + def physical_resource_id(self): + return self.arn + @property def rules(self): return self._non_default_rules + [self._default_rule] @@ -130,6 +194,28 @@ class FakeListener(BaseModel): self._non_default_rules.append(rule) self._non_default_rules = sorted(self._non_default_rules, key=lambda x: x.priority) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + elbv2_backend = elbv2_backends[region_name] + load_balancer_arn = properties.get("LoadBalancerArn") + protocol = properties.get("Protocol") + port = properties.get("Port") + ssl_policy = properties.get("SslPolicy") + certificates = properties.get("Certificates") + # transform default actions to confirm with the rest of the code and XML templates + if "DefaultActions" in properties: + default_actions = [] + for action in properties['DefaultActions']: + default_actions.append({'type': action['Type'], 'target_group_arn': action['TargetGroupArn']}) + else: + default_actions = None + + listener = elbv2_backend.create_listener( + load_balancer_arn, protocol, port, ssl_policy, certificates, default_actions) + return listener + class FakeRule(BaseModel): @@ -153,6 +239,8 @@ class FakeBackend(BaseModel): class FakeLoadBalancer(BaseModel): + VALID_ATTRS = {'access_logs.s3.enabled', 'access_logs.s3.bucket', 'access_logs.s3.prefix', + 'deletion_protection.enabled', 'idle_timeout.timeout_seconds'} def __init__(self, name, security_groups, subnets, vpc_id, arn, dns_name, scheme='internet-facing'): self.name = name @@ -166,9 +254,18 @@ class FakeLoadBalancer(BaseModel): self.arn = arn self.dns_name = dns_name + self.stack = 'ipv4' + self.attrs = { + 'access_logs.s3.enabled': 'false', + 'access_logs.s3.bucket': None, + 'access_logs.s3.prefix': None, + 'deletion_protection.enabled': 'false', + 'idle_timeout.timeout_seconds': '60' + } + @property def physical_resource_id(self): - return self.name + return self.arn def add_tag(self, key, value): if len(self.tags) >= 10 and key not in self.tags: @@ -186,6 +283,48 @@ class FakeLoadBalancer(BaseModel): ''' Not exposed as part of the ELB API - used for CloudFormation. ''' elbv2_backends[region].delete_load_balancer(self.arn) + @classmethod + def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): + properties = cloudformation_json['Properties'] + + elbv2_backend = elbv2_backends[region_name] + + name = properties.get('Name', resource_name) + security_groups = properties.get("SecurityGroups") + subnet_ids = properties.get('Subnets') + scheme = properties.get('Scheme', 'internet-facing') + + load_balancer = elbv2_backend.create_load_balancer(name, security_groups, subnet_ids, scheme=scheme) + return load_balancer + + def get_cfn_attribute(self, attribute_name): + ''' + Implemented attributes: + * DNSName + * LoadBalancerName + + Not implemented: + * CanonicalHostedZoneID + * LoadBalancerFullName + * SecurityGroups + + This method is similar to models.py:FakeLoadBalancer.get_cfn_attribute() + ''' + from moto.cloudformation.exceptions import UnformattedGetAttTemplateException + not_implemented_yet = [ + 'CanonicalHostedZoneID', + 'LoadBalancerFullName', + 'SecurityGroups', + ] + if attribute_name == 'DNSName': + return self.dns_name + elif attribute_name == 'LoadBalancerName': + return self.name + elif attribute_name in not_implemented_yet: + raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "%s" ]"' % attribute_name) + else: + raise UnformattedGetAttTemplateException() + class ELBv2Backend(BaseBackend): @@ -194,6 +333,26 @@ class ELBv2Backend(BaseBackend): self.target_groups = OrderedDict() self.load_balancers = OrderedDict() + @property + def ec2_backend(self): + """ + EC2 backend + + :return: EC2 Backend + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + + @property + def acm_backend(self): + """ + ACM backend + + :return: ACM Backend + :rtype: moto.acm.models.AWSCertificateManagerBackend + """ + return acm_backends[self.region_name] + def reset(self): region_name = self.region_name self.__dict__ = {} @@ -201,18 +360,17 @@ class ELBv2Backend(BaseBackend): def create_load_balancer(self, name, security_groups, subnet_ids, scheme='internet-facing'): vpc_id = None - ec2_backend = ec2_backends[self.region_name] subnets = [] if not subnet_ids: raise SubnetNotFoundError() for subnet_id in subnet_ids: - subnet = ec2_backend.get_subnet(subnet_id) + subnet = self.ec2_backend.get_subnet(subnet_id) if subnet is None: raise SubnetNotFoundError() subnets.append(subnet) vpc_id = subnets[0].vpc_id - arn = "arn:aws:elasticloadbalancing:%s:1:loadbalancer/%s/50dc6c495c0c9188" % (self.region_name, name) + arn = make_arn_for_load_balancer(account_id=1, name=name, region_name=self.region_name) dns_name = "%s-1.%s.elb.amazonaws.com" % (name, self.region_name) if arn in self.load_balancers: @@ -279,7 +437,7 @@ class ELBv2Backend(BaseBackend): def create_target_group(self, name, **kwargs): if len(name) > 32: raise InvalidTargetGroupNameError( - "Target group name '%s' cannot be longer than '32' characters" % name + "Target group name '%s' cannot be longer than '22' characters" % name ) if not re.match('^[a-zA-Z0-9\-]+$', name): raise InvalidTargetGroupNameError( @@ -300,7 +458,20 @@ class ELBv2Backend(BaseBackend): if target_group.name == name: raise DuplicateTargetGroupName() - arn = "arn:aws:elasticloadbalancing:%s:1:targetgroup/%s/50dc6c495c0c9188" % (self.region_name, name) + valid_protocols = ['HTTPS', 'HTTP', 'TCP'] + if kwargs.get('healthcheck_protocol') and kwargs['healthcheck_protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'healthCheckProtocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['healthcheck_protocol'], valid_protocols)) + if kwargs.get('protocol') and kwargs['protocol'] not in valid_protocols: + raise InvalidConditionValueError( + "Value {} at 'protocol' failed to satisfy constraint: " + "Member must satisfy enum value set: {}".format(kwargs['protocol'], valid_protocols)) + + if kwargs.get('matcher') and FakeTargetGroup.HTTP_CODE_REGEX.match(kwargs['matcher']['HttpCode']) is None: + raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') + + arn = make_arn_for_target_group(account_id=1, name=name, region_name=self.region_name) target_group = FakeTargetGroup(name, arn, **kwargs) self.target_groups[target_group.arn] = target_group return target_group @@ -547,6 +718,166 @@ class ELBv2Backend(BaseBackend): modified_rules.append(given_rule) return modified_rules + def set_ip_address_type(self, arn, ip_type): + if ip_type not in ('internal', 'dualstack'): + raise RESTError('InvalidParameterValue', 'IpAddressType must be either internal | dualstack') + + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + if ip_type == 'dualstack' and balancer.scheme == 'internal': + raise RESTError('InvalidConfigurationRequest', 'Internal load balancers cannot be dualstack') + + balancer.stack = ip_type + + def set_security_groups(self, arn, sec_groups): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + # Check all security groups exist + for sec_group_id in sec_groups: + if self.ec2_backend.get_security_group_from_id(sec_group_id) is None: + raise RESTError('InvalidSecurityGroup', 'Security group {0} does not exist'.format(sec_group_id)) + + balancer.security_groups = sec_groups + + def set_subnets(self, arn, subnets): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + subnet_objects = [] + sub_zone_list = {} + for subnet in subnets: + try: + subnet = self.ec2_backend.get_subnet(subnet) + + if subnet.availability_zone in sub_zone_list: + raise RESTError('InvalidConfigurationRequest', 'More than 1 subnet cannot be specified for 1 availability zone') + + sub_zone_list[subnet.availability_zone] = subnet.id + subnet_objects.append(subnet) + except Exception: + raise SubnetNotFoundError() + + if len(sub_zone_list) < 2: + raise RESTError('InvalidConfigurationRequest', 'More than 1 availability zone must be specified') + + balancer.subnets = subnet_objects + + return sub_zone_list.items() + + def modify_load_balancer_attributes(self, arn, attrs): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + for key in attrs: + if key not in FakeLoadBalancer.VALID_ATTRS: + raise RESTError('InvalidConfigurationRequest', 'Key {0} not valid'.format(key)) + + balancer.attrs.update(attrs) + return balancer.attrs + + def describe_load_balancer_attributes(self, arn): + balancer = self.load_balancers.get(arn) + if balancer is None: + raise LoadBalancerNotFoundError() + + return balancer.attrs + + def modify_target_group(self, arn, health_check_proto=None, health_check_port=None, health_check_path=None, health_check_interval=None, + health_check_timeout=None, healthy_threshold_count=None, unhealthy_threshold_count=None, http_codes=None): + target_group = self.target_groups.get(arn) + if target_group is None: + raise TargetGroupNotFoundError() + + if http_codes is not None and FakeTargetGroup.HTTP_CODE_REGEX.match(http_codes) is None: + raise RESTError('InvalidParameterValue', 'HttpCode must be like 200 | 200-399 | 200,201 ...') + + if http_codes is not None: + target_group.matcher['HttpCode'] = http_codes + if health_check_interval is not None: + target_group.healthcheck_interval_seconds = health_check_interval + if health_check_path is not None: + target_group.healthcheck_path = health_check_path + if health_check_port is not None: + target_group.healthcheck_port = health_check_port + if health_check_proto is not None: + target_group.healthcheck_protocol = health_check_proto + if health_check_timeout is not None: + target_group.healthcheck_timeout_seconds = health_check_timeout + if healthy_threshold_count is not None: + target_group.healthy_threshold_count = healthy_threshold_count + if unhealthy_threshold_count is not None: + target_group.unhealthy_threshold_count = unhealthy_threshold_count + + return target_group + + def modify_listener(self, arn, port=None, protocol=None, ssl_policy=None, certificates=None, default_actions=None): + for load_balancer in self.load_balancers.values(): + if arn in load_balancer.listeners: + break + else: + raise ListenerNotFoundError() + + listener = load_balancer.listeners[arn] + + if port is not None: + for listener_arn, current_listener in load_balancer.listeners.items(): + if listener_arn == arn: + continue + if listener.port == port: + raise DuplicateListenerError() + + listener.port = port + + if protocol is not None: + if protocol not in ('HTTP', 'HTTPS', 'TCP'): + raise RESTError('UnsupportedProtocol', 'Protocol {0} is not supported'.format(protocol)) + + # HTTPS checks + if protocol == 'HTTPS': + # HTTPS + + # Might already be HTTPS so may not provide certs + if certificates is None and listener.protocol != 'HTTPS': + raise RESTError('InvalidConfigurationRequest', 'Certificates must be provided for HTTPS') + + # Check certificates exist + if certificates is not None: + default_cert = None + all_certs = set() # for SNI + for cert in certificates: + if cert['is_default'] == 'true': + default_cert = cert['certificate_arn'] + try: + self.acm_backend.get_certificate(cert['certificate_arn']) + except Exception: + raise RESTError('CertificateNotFound', 'Certificate {0} not found'.format(cert['certificate_arn'])) + + all_certs.add(cert['certificate_arn']) + + if default_cert is None: + raise RESTError('InvalidConfigurationRequest', 'No default certificate') + + listener.certificate = default_cert + listener.certificates = list(all_certs) + + listener.protocol = protocol + + if ssl_policy is not None: + # Its already validated in responses.py + listener.ssl_policy = ssl_policy + + if default_actions is not None: + # Is currently not validated + listener.default_actions = default_actions + + return listener + def _any_listener_using(self, target_group_arn): for load_balancer in self.load_balancers.values(): for listener in load_balancer.listeners.values(): diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 3e8535187..7c71ce78a 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals +from moto.core.exceptions import RESTError +from moto.core.utils import amzn_request_id from moto.core.responses import BaseResponse from .models import elbv2_backends from .exceptions import DuplicateTagKeysError @@ -6,12 +8,131 @@ from .exceptions import LoadBalancerNotFoundError from .exceptions import TargetGroupNotFoundError -class ELBV2Response(BaseResponse): +SSL_POLICIES = [ + { + 'name': 'ELBSecurityPolicy-2016-08', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ], + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-2-2017-01', + 'ssl_protocols': ['TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 5}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 8}, + {'name': 'AES128-GCM-SHA256', 'priority': 9}, + {'name': 'AES128-SHA256', 'priority': 10}, + {'name': 'AES256-GCM-SHA384', 'priority': 11}, + {'name': 'AES256-SHA256', 'priority': 12} + ] + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-1-2017-01', + 'ssl_protocols': ['TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ] + }, + { + 'name': 'ELBSecurityPolicy-2015-05', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18} + ] + }, + { + 'name': 'ELBSecurityPolicy-TLS-1-0-2015-04', + 'ssl_protocols': ['TLSv1', 'TLSv1.1', 'TLSv1.2'], + 'ciphers': [ + {'name': 'ECDHE-ECDSA-AES128-GCM-SHA256', 'priority': 1}, + {'name': 'ECDHE-RSA-AES128-GCM-SHA256', 'priority': 2}, + {'name': 'ECDHE-ECDSA-AES128-SHA256', 'priority': 3}, + {'name': 'ECDHE-RSA-AES128-SHA256', 'priority': 4}, + {'name': 'ECDHE-ECDSA-AES128-SHA', 'priority': 5}, + {'name': 'ECDHE-RSA-AES128-SHA', 'priority': 6}, + {'name': 'ECDHE-ECDSA-AES256-GCM-SHA384', 'priority': 7}, + {'name': 'ECDHE-RSA-AES256-GCM-SHA384', 'priority': 8}, + {'name': 'ECDHE-ECDSA-AES256-SHA384', 'priority': 9}, + {'name': 'ECDHE-RSA-AES256-SHA384', 'priority': 10}, + {'name': 'ECDHE-RSA-AES256-SHA', 'priority': 11}, + {'name': 'ECDHE-ECDSA-AES256-SHA', 'priority': 12}, + {'name': 'AES128-GCM-SHA256', 'priority': 13}, + {'name': 'AES128-SHA256', 'priority': 14}, + {'name': 'AES128-SHA', 'priority': 15}, + {'name': 'AES256-GCM-SHA384', 'priority': 16}, + {'name': 'AES256-SHA256', 'priority': 17}, + {'name': 'AES256-SHA', 'priority': 18}, + {'name': 'DES-CBC3-SHA', 'priority': 19} + ] + } +] + +class ELBV2Response(BaseResponse): @property def elbv2_backend(self): return elbv2_backends[self.region] + @amzn_request_id def create_load_balancer(self): load_balancer_name = self._get_param('Name') subnet_ids = self._get_multi_param("Subnets.member") @@ -28,6 +149,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_LOAD_BALANCER_TEMPLATE) return template.render(load_balancer=load_balancer) + @amzn_request_id def create_rule(self): lister_arn = self._get_param('ListenerArn') _conditions = self._get_list_prefix('Conditions.member') @@ -52,18 +174,20 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_RULE_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def create_target_group(self): name = self._get_param('Name') vpc_id = self._get_param('VpcId') protocol = self._get_param('Protocol') port = self._get_param('Port') - healthcheck_protocol = self._get_param('HealthCheckProtocol', 'HTTP') - healthcheck_port = self._get_param('HealthCheckPort', 'traffic-port') - healthcheck_path = self._get_param('HealthCheckPath', '/') - healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds', '30') - healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds', '5') - healthy_threshold_count = self._get_param('HealthyThresholdCount', '5') - unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount', '2') + healthcheck_protocol = self._get_param('HealthCheckProtocol') + healthcheck_port = self._get_param('HealthCheckPort') + healthcheck_path = self._get_param('HealthCheckPath') + healthcheck_interval_seconds = self._get_param('HealthCheckIntervalSeconds') + healthcheck_timeout_seconds = self._get_param('HealthCheckTimeoutSeconds') + healthy_threshold_count = self._get_param('HealthyThresholdCount') + unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount') + matcher = self._get_param('Matcher') target_group = self.elbv2_backend.create_target_group( name, @@ -77,11 +201,13 @@ class ELBV2Response(BaseResponse): healthcheck_timeout_seconds=healthcheck_timeout_seconds, healthy_threshold_count=healthy_threshold_count, unhealthy_threshold_count=unhealthy_threshold_count, + matcher=matcher, ) template = self.response_template(CREATE_TARGET_GROUP_TEMPLATE) return template.render(target_group=target_group) + @amzn_request_id def create_listener(self): load_balancer_arn = self._get_param('LoadBalancerArn') protocol = self._get_param('Protocol') @@ -105,6 +231,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(CREATE_LISTENER_TEMPLATE) return template.render(listener=listener) + @amzn_request_id def describe_load_balancers(self): arns = self._get_multi_param("LoadBalancerArns.member") names = self._get_multi_param("Names.member") @@ -124,6 +251,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LOAD_BALANCERS_TEMPLATE) return template.render(load_balancers=load_balancers_resp, marker=next_marker) + @amzn_request_id def describe_rules(self): listener_arn = self._get_param('ListenerArn') rule_arns = self._get_multi_param('RuleArns.member') if any(k for k in list(self.querystring.keys()) if k.startswith('RuleArns.member')) else None @@ -144,6 +272,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_RULES_TEMPLATE) return template.render(rules=rules_resp, marker=next_marker) + @amzn_request_id def describe_target_groups(self): load_balancer_arn = self._get_param('LoadBalancerArn') target_group_arns = self._get_multi_param('TargetGroupArns.member') @@ -153,6 +282,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_GROUPS_TEMPLATE) return template.render(target_groups=target_groups) + @amzn_request_id def describe_target_group_attributes(self): target_group_arn = self._get_param('TargetGroupArn') target_group = self.elbv2_backend.target_groups.get(target_group_arn) @@ -161,6 +291,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=target_group.attributes) + @amzn_request_id def describe_listeners(self): load_balancer_arn = self._get_param('LoadBalancerArn') listener_arns = self._get_multi_param('ListenerArns.member') @@ -171,30 +302,35 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_LISTENERS_TEMPLATE) return template.render(listeners=listeners) + @amzn_request_id def delete_load_balancer(self): arn = self._get_param('LoadBalancerArn') self.elbv2_backend.delete_load_balancer(arn) template = self.response_template(DELETE_LOAD_BALANCER_TEMPLATE) return template.render() + @amzn_request_id def delete_rule(self): arn = self._get_param('RuleArn') self.elbv2_backend.delete_rule(arn) template = self.response_template(DELETE_RULE_TEMPLATE) return template.render() + @amzn_request_id def delete_target_group(self): arn = self._get_param('TargetGroupArn') self.elbv2_backend.delete_target_group(arn) template = self.response_template(DELETE_TARGET_GROUP_TEMPLATE) return template.render() + @amzn_request_id def delete_listener(self): arn = self._get_param('ListenerArn') self.elbv2_backend.delete_listener(arn) template = self.response_template(DELETE_LISTENER_TEMPLATE) return template.render() + @amzn_request_id def modify_rule(self): rule_arn = self._get_param('RuleArn') _conditions = self._get_list_prefix('Conditions.member') @@ -217,6 +353,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(MODIFY_RULE_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def modify_target_group_attributes(self): target_group_arn = self._get_param('TargetGroupArn') target_group = self.elbv2_backend.target_groups.get(target_group_arn) @@ -230,6 +367,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(MODIFY_TARGET_GROUP_ATTRIBUTES_TEMPLATE) return template.render(attributes=attributes) + @amzn_request_id def register_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -238,6 +376,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(REGISTER_TARGETS_TEMPLATE) return template.render() + @amzn_request_id def deregister_targets(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -246,6 +385,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DEREGISTER_TARGETS_TEMPLATE) return template.render() + @amzn_request_id def describe_target_health(self): target_group_arn = self._get_param('TargetGroupArn') targets = self._get_list_prefix('Targets.member') @@ -254,6 +394,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TARGET_HEALTH_TEMPLATE) return template.render(target_health_descriptions=target_health_descriptions) + @amzn_request_id def set_rule_priorities(self): rule_priorities = self._get_list_prefix('RulePriorities.member') for rule_priority in rule_priorities: @@ -262,6 +403,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(SET_RULE_PRIORITIES_TEMPLATE) return template.render(rules=rules) + @amzn_request_id def add_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') @@ -281,6 +423,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(ADD_TAGS_TEMPLATE) return template.render() + @amzn_request_id def remove_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') tag_keys = self._get_multi_param('TagKeys.member') @@ -301,6 +444,7 @@ class ELBV2Response(BaseResponse): template = self.response_template(REMOVE_TAGS_TEMPLATE) return template.render() + @amzn_request_id def describe_tags(self): resource_arns = self._get_multi_param('ResourceArns.member') resources = [] @@ -320,6 +464,125 @@ class ELBV2Response(BaseResponse): template = self.response_template(DESCRIBE_TAGS_TEMPLATE) return template.render(resources=resources) + @amzn_request_id + def describe_account_limits(self): + # Supports paging but not worth implementing yet + # marker = self._get_param('Marker') + # page_size = self._get_param('PageSize') + + limits = { + 'application-load-balancers': 20, + 'target-groups': 3000, + 'targets-per-application-load-balancer': 30, + 'listeners-per-application-load-balancer': 50, + 'rules-per-application-load-balancer': 100, + 'network-load-balancers': 20, + 'targets-per-network-load-balancer': 200, + 'listeners-per-network-load-balancer': 50 + } + + template = self.response_template(DESCRIBE_LIMITS_TEMPLATE) + return template.render(limits=limits) + + @amzn_request_id + def describe_ssl_policies(self): + names = self._get_multi_param('Names.member.') + # Supports paging but not worth implementing yet + # marker = self._get_param('Marker') + # page_size = self._get_param('PageSize') + + policies = SSL_POLICIES + if names: + policies = filter(lambda policy: policy['name'] in names, policies) + + template = self.response_template(DESCRIBE_SSL_POLICIES_TEMPLATE) + return template.render(policies=policies) + + @amzn_request_id + def set_ip_address_type(self): + arn = self._get_param('LoadBalancerArn') + ip_type = self._get_param('IpAddressType') + + self.elbv2_backend.set_ip_address_type(arn, ip_type) + + template = self.response_template(SET_IP_ADDRESS_TYPE_TEMPLATE) + return template.render(ip_type=ip_type) + + @amzn_request_id + def set_security_groups(self): + arn = self._get_param('LoadBalancerArn') + sec_groups = self._get_multi_param('SecurityGroups.member.') + + self.elbv2_backend.set_security_groups(arn, sec_groups) + + template = self.response_template(SET_SECURITY_GROUPS_TEMPLATE) + return template.render(sec_groups=sec_groups) + + @amzn_request_id + def set_subnets(self): + arn = self._get_param('LoadBalancerArn') + subnets = self._get_multi_param('Subnets.member.') + + subnet_zone_list = self.elbv2_backend.set_subnets(arn, subnets) + + template = self.response_template(SET_SUBNETS_TEMPLATE) + return template.render(subnets=subnet_zone_list) + + @amzn_request_id + def modify_load_balancer_attributes(self): + arn = self._get_param('LoadBalancerArn') + attrs = self._get_map_prefix('Attributes.member', key_end='Key', value_end='Value') + + all_attrs = self.elbv2_backend.modify_load_balancer_attributes(arn, attrs) + + template = self.response_template(MODIFY_LOADBALANCER_ATTRS_TEMPLATE) + return template.render(attrs=all_attrs) + + @amzn_request_id + def describe_load_balancer_attributes(self): + arn = self._get_param('LoadBalancerArn') + attrs = self.elbv2_backend.describe_load_balancer_attributes(arn) + + template = self.response_template(DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE) + return template.render(attrs=attrs) + + @amzn_request_id + def modify_target_group(self): + arn = self._get_param('TargetGroupArn') + + health_check_proto = self._get_param('HealthCheckProtocol') # 'HTTP' | 'HTTPS' | 'TCP', + health_check_port = self._get_param('HealthCheckPort') + health_check_path = self._get_param('HealthCheckPath') + health_check_interval = self._get_param('HealthCheckIntervalSeconds') + health_check_timeout = self._get_param('HealthCheckTimeoutSeconds') + healthy_threshold_count = self._get_param('HealthyThresholdCount') + unhealthy_threshold_count = self._get_param('UnhealthyThresholdCount') + http_codes = self._get_param('Matcher.HttpCode') + + target_group = self.elbv2_backend.modify_target_group(arn, health_check_proto, health_check_port, health_check_path, health_check_interval, + health_check_timeout, healthy_threshold_count, unhealthy_threshold_count, http_codes) + + template = self.response_template(MODIFY_TARGET_GROUP_TEMPLATE) + return template.render(target_group=target_group) + + @amzn_request_id + def modify_listener(self): + arn = self._get_param('ListenerArn') + port = self._get_param('Port') + protocol = self._get_param('Protocol') + ssl_policy = self._get_param('SslPolicy') + certificates = self._get_list_prefix('Certificates.member') + default_actions = self._get_list_prefix('DefaultActions.member') + + # Should really move SSL Policies to models + if ssl_policy is not None and ssl_policy not in [item['name'] for item in SSL_POLICIES]: + raise RESTError('SSLPolicyNotFound', 'Policy {0} not found'.format(ssl_policy)) + + listener = self.elbv2_backend.modify_listener(arn, port, protocol, ssl_policy, certificates, default_actions) + + template = self.response_template(MODIFY_LISTENER_TEMPLATE) + return template.render(listener=listener) + def _add_tags(self, resource): tag_values = [] tag_keys = [] @@ -348,14 +611,14 @@ class ELBV2Response(BaseResponse): ADD_TAGS_TEMPLATE = """ - 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + {{ request_id }} """ REMOVE_TAGS_TEMPLATE = """ - 360e81f7-1100-11e4-b6ed-0f30EXAMPLE + {{ request_id }} """ @@ -378,11 +641,10 @@ DESCRIBE_TAGS_TEMPLATE = """ @@ -415,7 +677,7 @@ CREATE_LOAD_BALANCER_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_RULE_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_TARGET_GROUP_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ DELETE_LISTENER_TEMPLATE = """ - 1549581b-12b7-11e3-895e-1334aEXAMPLE + {{ request_id }} """ @@ -572,6 +841,7 @@ DESCRIBE_LOAD_BALANCERS_TEMPLATE = """ @@ -671,11 +945,10 @@ DESCRIBE_TARGET_GROUP_ATTRIBUTES_TEMPLATE = """ - 70092c0e-f3a9-11e5-ae48-cff02092876b + {{ request_id }} """ - DESCRIBE_LISTENERS_TEMPLATE = """ @@ -706,7 +979,7 @@ DESCRIBE_LISTENERS_TEMPLATE = """ - 70092c0e-f3a9-11e5-ae48-cff02092876b + {{ request_id }} """ @@ -782,7 +1055,7 @@ REGISTER_TARGETS_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ - DELETE_LOAD_BALANCER_LISTENERS = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -837,7 +1109,7 @@ DESCRIBE_ATTRIBUTES_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -871,7 +1143,7 @@ MODIFY_ATTRIBUTES_TEMPLATE = """ - 83c88b9d-12b7-11e3-8b82-87b12EXAMPLE + {{ request_id }} """ @@ -887,7 +1159,7 @@ CREATE_LOAD_BALANCER_POLICY_TEMPLATE = """ - 07b1ecbc-1100-11e3-acaf-dd7edEXAMPLE + {{ request_id }} """ @@ -895,7 +1167,7 @@ SET_LOAD_BALANCER_POLICIES_OF_LISTENER_TEMPLATE = """ - 0eb9b381-dde0-11e2-8d78-6ddbaEXAMPLE + {{ request_id }} """ @@ -918,7 +1190,7 @@ DESCRIBE_TARGET_HEALTH_TEMPLATE = """ + + + {% for key, value in limits.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + + {{ request_id }} + +""" + +DESCRIBE_SSL_POLICIES_TEMPLATE = """ + + + {% for policy in policies %} + + {{ policy['name'] }} + + {% for cipher in policy['ciphers'] %} + + {{ cipher['name'] }} + {{ cipher['priority'] }} + + {% endfor %} + + + {% for proto in policy['ssl_protocols'] %} + {{ proto }} + {% endfor %} + + + {% endfor %} + + + + {{ request_id }} + +""" + +SET_IP_ADDRESS_TYPE_TEMPLATE = """ + + {{ ip_type }} + + + {{ request_id }} + +""" + +SET_SECURITY_GROUPS_TEMPLATE = """ + + + {% for group in sec_groups %} + {{ group }} + {% endfor %} + + + + {{ request_id }} + +""" + +SET_SUBNETS_TEMPLATE = """ + + + {% for zone_id, subnet_id in subnets %} + + {{ subnet_id }} + {{ zone_id }} + + {% endfor %} + + + + {{ request_id }} + +""" + +MODIFY_LOADBALANCER_ATTRS_TEMPLATE = """ + + + {% for key, value in attrs.items() %} + + {% if value == None %}{% else %}{{ value }}{% endif %} + {{ key }} + + {% endfor %} + + + + {{ request_id }} + +""" + +DESCRIBE_LOADBALANCER_ATTRS_TEMPLATE = """ + + + {% for key, value in attrs.items() %} + + {% if value == None %}{% else %}{{ value }}{% endif %} + {{ key }} + + {% endfor %} + + + + {{ request_id }} + +""" + +MODIFY_TARGET_GROUP_TEMPLATE = """ + + + + {{ target_group.arn }} + {{ target_group.name }} + {{ target_group.protocol }} + {{ target_group.port }} + {{ target_group.vpc_id }} + {{ target_group.healthcheck_protocol }} + {{ target_group.healthcheck_port }} + {{ target_group.healthcheck_path }} + {{ target_group.healthcheck_interval_seconds }} + {{ target_group.healthcheck_timeout_seconds }} + {{ target_group.healthy_threshold_count }} + {{ target_group.unhealthy_threshold_count }} + + {{ target_group.matcher['HttpCode'] }} + + + {% for load_balancer_arn in target_group.load_balancer_arns %} + {{ load_balancer_arn }} + {% endfor %} + + + + + + {{ request_id }} + +""" + +MODIFY_LISTENER_TEMPLATE = """ + + + + {{ listener.load_balancer_arn }} + {{ listener.protocol }} + {% if listener.certificates %} + + {% for cert in listener.certificates %} + + {{ cert }} + + {% endfor %} + + {% endif %} + {{ listener.port }} + {{ listener.ssl_policy }} + {{ listener.arn }} + + {% for action in listener.default_actions %} + + {{ action.type }} + {{ action.target_group_arn }} + + {% endfor %} + + + + + + {{ request_id }} + +""" diff --git a/moto/elbv2/urls.py b/moto/elbv2/urls.py index 13a8e056f..af51f7d3a 100644 --- a/moto/elbv2/urls.py +++ b/moto/elbv2/urls.py @@ -1,10 +1,10 @@ from __future__ import unicode_literals -from .responses import ELBV2Response +from ..elb.urls import api_version_elb_backend url_bases = [ "https?://elasticloadbalancing.(.+).amazonaws.com", ] url_paths = { - '{0}/$': ELBV2Response.dispatch, + '{0}/$': api_version_elb_backend, } diff --git a/moto/elbv2/utils.py b/moto/elbv2/utils.py new file mode 100644 index 000000000..47a3e66d5 --- /dev/null +++ b/moto/elbv2/utils.py @@ -0,0 +1,8 @@ +def make_arn_for_load_balancer(account_id, name, region_name): + return "arn:aws:elasticloadbalancing:{}:{}:loadbalancer/{}/50dc6c495c0c9188".format( + region_name, account_id, name) + + +def make_arn_for_target_group(account_id, name, region_name): + return "arn:aws:elasticloadbalancing:{}:{}:targetgroup/{}/50dc6c495c0c9188".format( + region_name, account_id, name) diff --git a/moto/events/models.py b/moto/events/models.py index faec7b434..5c1d507ca 100644 --- a/moto/events/models.py +++ b/moto/events/models.py @@ -1,6 +1,7 @@ import os import re +from moto.core.exceptions import JsonRESTError from moto.core import BaseBackend, BaseModel @@ -50,6 +51,8 @@ class Rule(BaseModel): class EventsBackend(BaseBackend): + ACCOUNT_ID = re.compile(r'^(\d{1,12}|\*)$') + STATEMENT_ID = re.compile(r'^[a-zA-Z0-9-_]{1,64}$') def __init__(self): self.rules = {} @@ -58,6 +61,8 @@ class EventsBackend(BaseBackend): self.rules_order = [] self.next_tokens = {} + self.permissions = {} + def _get_rule_by_index(self, i): return self.rules.get(self.rules_order[i]) @@ -181,6 +186,17 @@ class EventsBackend(BaseBackend): return False + def put_events(self, events): + num_events = len(events) + + if num_events < 1: + raise JsonRESTError('ValidationError', 'Need at least 1 event') + elif num_events > 10: + raise JsonRESTError('ValidationError', 'Can only submit 10 events at once') + + # We dont really need to store the events yet + return [] + def remove_targets(self, name, ids): rule = self.rules.get(name) @@ -193,5 +209,40 @@ class EventsBackend(BaseBackend): def test_event_pattern(self): raise NotImplementedError() + def put_permission(self, action, principal, statement_id): + if action is None or action != 'PutEvents': + raise JsonRESTError('InvalidParameterValue', 'Action must be PutEvents') + + if principal is None or self.ACCOUNT_ID.match(principal) is None: + raise JsonRESTError('InvalidParameterValue', 'Principal must match ^(\d{1,12}|\*)$') + + if statement_id is None or self.STATEMENT_ID.match(statement_id) is None: + raise JsonRESTError('InvalidParameterValue', 'StatementId must match ^[a-zA-Z0-9-_]{1,64}$') + + self.permissions[statement_id] = {'action': action, 'principal': principal} + + def remove_permission(self, statement_id): + try: + del self.permissions[statement_id] + except KeyError: + raise JsonRESTError('ResourceNotFoundException', 'StatementId not found') + + def describe_event_bus(self): + arn = "arn:aws:events:us-east-1:000000000000:event-bus/default" + statements = [] + for statement_id, data in self.permissions.items(): + statements.append({ + 'Sid': statement_id, + 'Effect': 'Allow', + 'Principal': {'AWS': 'arn:aws:iam::{0}:root'.format(data['principal'])}, + 'Action': 'events:{0}'.format(data['action']), + 'Resource': arn + }) + return { + 'Policy': {'Version': '2012-10-17', 'Statement': statements}, + 'Name': 'default', + 'Arn': arn + } + events_backend = EventsBackend() diff --git a/moto/events/responses.py b/moto/events/responses.py index 8f433844a..f9cb9b5b5 100644 --- a/moto/events/responses.py +++ b/moto/events/responses.py @@ -18,9 +18,17 @@ class EventsHandler(BaseResponse): 'RoleArn': rule.role_arn } - def load_body(self): - decoded_body = self.body - return json.loads(decoded_body or '{}') + @property + def request_params(self): + if not hasattr(self, '_json_body'): + try: + self._json_body = json.loads(self.body) + except ValueError: + self._json_body = {} + return self._json_body + + def _get_param(self, param, if_none=None): + return self.request_params.get(param, if_none) def error(self, type_, message='', status=400): headers = self.response_headers @@ -28,8 +36,7 @@ class EventsHandler(BaseResponse): return json.dumps({'__type': type_, 'message': message}), headers, def delete_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -38,8 +45,7 @@ class EventsHandler(BaseResponse): return '', self.response_headers def describe_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -53,8 +59,7 @@ class EventsHandler(BaseResponse): return json.dumps(rule_dict), self.response_headers def disable_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -65,8 +70,7 @@ class EventsHandler(BaseResponse): return '', self.response_headers def enable_rule(self): - body = self.load_body() - name = body.get('Name') + name = self._get_param('Name') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -80,10 +84,9 @@ class EventsHandler(BaseResponse): pass def list_rule_names_by_target(self): - body = self.load_body() - target_arn = body.get('TargetArn') - next_token = body.get('NextToken') - limit = body.get('Limit') + target_arn = self._get_param('TargetArn') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') if not target_arn: return self.error('ValidationException', 'Parameter TargetArn is required.') @@ -94,10 +97,9 @@ class EventsHandler(BaseResponse): return json.dumps(rule_names), self.response_headers def list_rules(self): - body = self.load_body() - prefix = body.get('NamePrefix') - next_token = body.get('NextToken') - limit = body.get('Limit') + prefix = self._get_param('NamePrefix') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') rules = events_backend.list_rules(prefix, next_token, limit) rules_obj = {'Rules': []} @@ -111,10 +113,9 @@ class EventsHandler(BaseResponse): return json.dumps(rules_obj), self.response_headers def list_targets_by_rule(self): - body = self.load_body() - rule_name = body.get('Rule') - next_token = body.get('NextToken') - limit = body.get('Limit') + rule_name = self._get_param('Rule') + next_token = self._get_param('NextToken') + limit = self._get_param('Limit') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -128,13 +129,25 @@ class EventsHandler(BaseResponse): return json.dumps(targets), self.response_headers def put_events(self): + events = self._get_param('Entries') + + failed_entries = events_backend.put_events(events) + + if failed_entries: + return json.dumps({ + 'FailedEntryCount': len(failed_entries), + 'Entries': failed_entries + }) + return '', self.response_headers def put_rule(self): - body = self.load_body() - name = body.get('Name') - event_pattern = body.get('EventPattern') - sched_exp = body.get('ScheduleExpression') + name = self._get_param('Name') + event_pattern = self._get_param('EventPattern') + sched_exp = self._get_param('ScheduleExpression') + state = self._get_param('State') + desc = self._get_param('Description') + role_arn = self._get_param('RoleArn') if not name: return self.error('ValidationException', 'Parameter Name is required.') @@ -156,17 +169,16 @@ class EventsHandler(BaseResponse): name, ScheduleExpression=sched_exp, EventPattern=event_pattern, - State=body.get('State'), - Description=body.get('Description'), - RoleArn=body.get('RoleArn') + State=state, + Description=desc, + RoleArn=role_arn ) return json.dumps({'RuleArn': rule_arn}), self.response_headers def put_targets(self): - body = self.load_body() - rule_name = body.get('Rule') - targets = body.get('Targets') + rule_name = self._get_param('Rule') + targets = self._get_param('Targets') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -180,9 +192,8 @@ class EventsHandler(BaseResponse): return '', self.response_headers def remove_targets(self): - body = self.load_body() - rule_name = body.get('Rule') - ids = body.get('Ids') + rule_name = self._get_param('Rule') + ids = self._get_param('Ids') if not rule_name: return self.error('ValidationException', 'Parameter Rule is required.') @@ -197,3 +208,22 @@ class EventsHandler(BaseResponse): def test_event_pattern(self): pass + + def put_permission(self): + action = self._get_param('Action') + principal = self._get_param('Principal') + statement_id = self._get_param('StatementId') + + events_backend.put_permission(action, principal, statement_id) + + return '' + + def remove_permission(self): + statement_id = self._get_param('StatementId') + + events_backend.remove_permission(statement_id) + + return '' + + def describe_event_bus(self): + return json.dumps(events_backend.describe_event_bus()) diff --git a/moto/iam/models.py b/moto/iam/models.py index 18ed513b4..22bdfdb4b 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -528,6 +528,12 @@ class IAMBackend(BaseBackend): return role raise IAMNotFoundException("Role {0} not found".format(role_name)) + def get_role_by_arn(self, arn): + for role in self.get_roles(): + if role.arn == arn: + return role + raise IAMNotFoundException("Role {0} not found".format(arn)) + def delete_role(self, role_name): for role in self.get_roles(): if role.name == role_name: diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 6ca49b830..df32732a0 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -1159,9 +1159,7 @@ CREATE_ACCESS_KEY_TEMPLATE = """ {{ key.user_name }} {{ key.access_key_id }} {{ key.status }} - - {{ key.secret_access_key }} - + {{ key.secret_access_key }} diff --git a/moto/iot/__init__.py b/moto/iot/__init__.py new file mode 100644 index 000000000..199b8aeae --- /dev/null +++ b/moto/iot/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import iot_backends +from ..core.models import base_decorator + +iot_backend = iot_backends['us-east-1'] +mock_iot = base_decorator(iot_backends) diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py new file mode 100644 index 000000000..4bb01c095 --- /dev/null +++ b/moto/iot/exceptions.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTClientError): + def __init__(self): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", + "The request is not valid." + ) diff --git a/moto/iot/models.py b/moto/iot/models.py new file mode 100644 index 000000000..1efa6690e --- /dev/null +++ b/moto/iot/models.py @@ -0,0 +1,364 @@ +from __future__ import unicode_literals +import time +import boto3 +import string +import random +import hashlib +import uuid +from moto.core import BaseBackend, BaseModel +from collections import OrderedDict +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException +) + + +class FakeThing(BaseModel): + def __init__(self, thing_name, thing_type, attributes, region_name): + self.region_name = region_name + self.thing_name = thing_name + self.thing_type = thing_type + self.attributes = attributes + self.arn = 'arn:aws:iot:%s:1:thing/%s' % (self.region_name, thing_name) + self.version = 1 + # TODO: we need to handle 'version'? + + # for iot-data + self.thing_shadow = None + + def to_dict(self, include_default_client_id=False): + obj = { + 'thingName': self.thing_name, + 'attributes': self.attributes, + 'version': self.version + } + if self.thing_type: + obj['thingTypeName'] = self.thing_type.thing_type_name + if include_default_client_id: + obj['defaultClientId'] = self.thing_name + return obj + + +class FakeThingType(BaseModel): + def __init__(self, thing_type_name, thing_type_properties, region_name): + self.region_name = region_name + self.thing_type_name = thing_type_name + self.thing_type_properties = thing_type_properties + t = time.time() + self.metadata = { + 'deprecated': False, + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thingtype/%s' % (self.region_name, thing_type_name) + + def to_dict(self): + return { + 'thingTypeName': self.thing_type_name, + 'thingTypeProperties': self.thing_type_properties, + 'thingTypeMetadata': self.metadata + } + + +class FakeCertificate(BaseModel): + def __init__(self, certificate_pem, status, region_name): + m = hashlib.sha256() + m.update(str(uuid.uuid4()).encode('utf-8')) + self.certificate_id = m.hexdigest() + self.arn = 'arn:aws:iot:%s:1:cert/%s' % (region_name, self.certificate_id) + self.certificate_pem = certificate_pem + self.status = status + + # TODO: must adjust + self.owner = '1' + self.transfer_data = {} + self.creation_date = time.time() + self.last_modified_date = self.creation_date + self.ca_certificate_id = None + + def to_dict(self): + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'creationDate': self.creation_date + } + + def to_description_dict(self): + """ + You might need keys below in some situation + - caCertificateId + - previousOwnedBy + """ + return { + 'certificateArn': self.arn, + 'certificateId': self.certificate_id, + 'status': self.status, + 'certificatePem': self.certificate_pem, + 'ownedBy': self.owner, + 'creationDate': self.creation_date, + 'lastModifiedDate': self.last_modified_date, + 'transferData': self.transfer_data + } + + +class FakePolicy(BaseModel): + def __init__(self, name, document, region_name): + self.name = name + self.document = document + self.arn = 'arn:aws:iot:%s:1:policy/%s' % (region_name, name) + self.version = '1' # TODO: handle version + + def to_get_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'defaultVersionId': self.version + } + + def to_dict_at_creation(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + 'policyDocument': self.document, + 'policyVersionId': self.version + } + + def to_dict(self): + return { + 'policyName': self.name, + 'policyArn': self.arn, + } + + +class IoTBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTBackend, self).__init__() + self.region_name = region_name + self.things = OrderedDict() + self.thing_types = OrderedDict() + self.certificates = OrderedDict() + self.policies = OrderedDict() + self.principal_policies = OrderedDict() + self.principal_things = OrderedDict() + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def create_thing(self, thing_name, thing_type_name, attribute_payload): + thing_types = self.list_thing_types() + thing_type = None + if thing_type_name: + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + if attribute_payload is None: + attributes = {} + elif 'attributes' not in attribute_payload: + attributes = {} + else: + attributes = attribute_payload['attributes'] + thing = FakeThing(thing_name, thing_type, attributes, self.region_name) + self.things[thing.arn] = thing + return thing.thing_name, thing.arn + + def create_thing_type(self, thing_type_name, thing_type_properties): + if thing_type_properties is None: + thing_type_properties = {} + thing_type = FakeThingType(thing_type_name, thing_type_properties, self.region_name) + self.thing_types[thing_type.arn] = thing_type + return thing_type.thing_type_name, thing_type.arn + + def list_thing_types(self, thing_type_name=None): + if thing_type_name: + # It's wierd but thing_type_name is filterd by forward match, not complete match + return [_ for _ in self.thing_types.values() if _.thing_type_name.startswith(thing_type_name)] + thing_types = self.thing_types.values() + return thing_types + + def list_things(self, attribute_name, attribute_value, thing_type_name): + # TODO: filter by attributess or thing_type + things = self.things.values() + return things + + def describe_thing(self, thing_name): + things = [_ for _ in self.things.values() if _.thing_name == thing_name] + if len(things) == 0: + raise ResourceNotFoundException() + return things[0] + + def describe_thing_type(self, thing_type_name): + thing_types = [_ for _ in self.thing_types.values() if _.thing_type_name == thing_type_name] + if len(thing_types) == 0: + raise ResourceNotFoundException() + return thing_types[0] + + def delete_thing(self, thing_name, expected_version): + # TODO: handle expected_version + + # can raise ResourceNotFoundError + thing = self.describe_thing(thing_name) + del self.things[thing.arn] + + def delete_thing_type(self, thing_type_name): + # can raise ResourceNotFoundError + thing_type = self.describe_thing_type(thing_type_name) + del self.thing_types[thing_type.arn] + + def update_thing(self, thing_name, thing_type_name, attribute_payload, expected_version, remove_thing_type): + # if attributes payload = {}, nothing + thing = self.describe_thing(thing_name) + thing_type = None + + if remove_thing_type and thing_type_name: + raise InvalidRequestException() + + # thing_type + if thing_type_name: + thing_types = self.list_thing_types() + filtered_thing_types = [_ for _ in thing_types if _.thing_type_name == thing_type_name] + if len(filtered_thing_types) == 0: + raise ResourceNotFoundException() + thing_type = filtered_thing_types[0] + thing.thing_type = thing_type + + if remove_thing_type: + thing.thing_type = None + + # attribute + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing.attributes = attributes + else: + thing.attributes.update(attributes) + + def _random_string(self): + n = 20 + random_str = ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)]) + return random_str + + def create_keys_and_certificate(self, set_as_active): + # implement here + # caCertificate can be blank + key_pair = { + 'PublicKey': self._random_string(), + 'PrivateKey': self._random_string() + } + certificate_pem = self._random_string() + status = 'ACTIVE' if set_as_active else 'INACTIVE' + certificate = FakeCertificate(certificate_pem, status, self.region_name) + self.certificates[certificate.certificate_id] = certificate + return certificate, key_pair + + def delete_certificate(self, certificate_id): + self.describe_certificate(certificate_id) + del self.certificates[certificate_id] + + def describe_certificate(self, certificate_id): + certs = [_ for _ in self.certificates.values() if _.certificate_id == certificate_id] + if len(certs) == 0: + raise ResourceNotFoundException() + return certs[0] + + def list_certificates(self): + return self.certificates.values() + + def update_certificate(self, certificate_id, new_status): + cert = self.describe_certificate(certificate_id) + # TODO: validate new_status + cert.status = new_status + + def create_policy(self, policy_name, policy_document): + policy = FakePolicy(policy_name, policy_document, self.region_name) + self.policies[policy.name] = policy + return policy + + def list_policies(self): + policies = self.policies.values() + return policies + + def get_policy(self, policy_name): + policies = [_ for _ in self.policies.values() if _.name == policy_name] + if len(policies) == 0: + raise ResourceNotFoundException() + return policies[0] + + def delete_policy(self, policy_name): + policy = self.get_policy(policy_name) + del self.policies[policy.name] + + def _get_principal(self, principal_arn): + """ + raise ResourceNotFoundException + """ + if ':cert/' in principal_arn: + certs = [_ for _ in self.certificates.values() if _.arn == principal_arn] + if len(certs) == 0: + raise ResourceNotFoundException() + principal = certs[0] + return principal + else: + # TODO: search for cognito_ids + pass + raise ResourceNotFoundException() + + def attach_principal_policy(self, policy_name, principal_arn): + principal = self._get_principal(principal_arn) + policy = self.get_policy(policy_name) + k = (principal_arn, policy_name) + if k in self.principal_policies: + return + self.principal_policies[k] = (principal, policy) + + def detach_principal_policy(self, policy_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.get_policy(policy_name) + + k = (principal_arn, policy_name) + if k not in self.principal_policies: + raise ResourceNotFoundException() + del self.principal_policies[k] + + def list_principal_policies(self, principal_arn): + policies = [v[1] for k, v in self.principal_policies.items() if k[0] == principal_arn] + return policies + + def list_policy_principals(self, policy_name): + principals = [k[0] for k, v in self.principal_policies.items() if k[1] == policy_name] + return principals + + def attach_thing_principal(self, thing_name, principal_arn): + principal = self._get_principal(principal_arn) + thing = self.describe_thing(thing_name) + k = (principal_arn, thing_name) + if k in self.principal_things: + return + self.principal_things[k] = (principal, thing) + + def detach_thing_principal(self, thing_name, principal_arn): + # this may raises ResourceNotFoundException + self._get_principal(principal_arn) + self.describe_thing(thing_name) + + k = (principal_arn, thing_name) + if k not in self.principal_things: + raise ResourceNotFoundException() + del self.principal_things[k] + + def list_principal_things(self, principal_arn): + thing_names = [k[0] for k, v in self.principal_things.items() if k[0] == principal_arn] + return thing_names + + def list_thing_principals(self, thing_name): + principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] + return principals + + +available_regions = boto3.session.Session().get_available_regions("iot") +iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py new file mode 100644 index 000000000..bbe2bb016 --- /dev/null +++ b/moto/iot/responses.py @@ -0,0 +1,258 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import iot_backends +import json + + +class IoTResponse(BaseResponse): + SERVICE_NAME = 'iot' + + @property + def iot_backend(self): + return iot_backends[self.region] + + def create_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + thing_name, thing_arn = self.iot_backend.create_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + ) + return json.dumps(dict(thingName=thing_name, thingArn=thing_arn)) + + def create_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type_properties = self._get_param("thingTypeProperties") + thing_type_name, thing_type_arn = self.iot_backend.create_thing_type( + thing_type_name=thing_type_name, + thing_type_properties=thing_type_properties, + ) + return json.dumps(dict(thingTypeName=thing_type_name, thingTypeArn=thing_type_arn)) + + def list_thing_types(self): + # previous_next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_type_name = self._get_param("thingTypeName") + thing_types = self.iot_backend.list_thing_types( + thing_type_name=thing_type_name + ) + + # TODO: support next_token and max_results + next_token = None + return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token)) + + def list_things(self): + # previous_next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + attribute_name = self._get_param("attributeName") + attribute_value = self._get_param("attributeValue") + thing_type_name = self._get_param("thingTypeName") + things = self.iot_backend.list_things( + attribute_name=attribute_name, + attribute_value=attribute_value, + thing_type_name=thing_type_name, + ) + # TODO: support next_token and max_results + next_token = None + return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token)) + + def describe_thing(self): + thing_name = self._get_param("thingName") + thing = self.iot_backend.describe_thing( + thing_name=thing_name, + ) + print(thing.to_dict(include_default_client_id=True)) + return json.dumps(thing.to_dict(include_default_client_id=True)) + + def describe_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + thing_type = self.iot_backend.describe_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(thing_type.to_dict()) + + def delete_thing(self): + thing_name = self._get_param("thingName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing( + thing_name=thing_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def delete_thing_type(self): + thing_type_name = self._get_param("thingTypeName") + self.iot_backend.delete_thing_type( + thing_type_name=thing_type_name, + ) + return json.dumps(dict()) + + def update_thing(self): + thing_name = self._get_param("thingName") + thing_type_name = self._get_param("thingTypeName") + attribute_payload = self._get_param("attributePayload") + expected_version = self._get_param("expectedVersion") + remove_thing_type = self._get_param("removeThingType") + self.iot_backend.update_thing( + thing_name=thing_name, + thing_type_name=thing_type_name, + attribute_payload=attribute_payload, + expected_version=expected_version, + remove_thing_type=remove_thing_type, + ) + return json.dumps(dict()) + + def create_keys_and_certificate(self): + set_as_active = self._get_param("setAsActive") + cert, key_pair = self.iot_backend.create_keys_and_certificate( + set_as_active=set_as_active, + ) + return json.dumps(dict( + certificateArn=cert.arn, + certificateId=cert.certificate_id, + certificatePem=cert.certificate_pem, + keyPair=key_pair + )) + + def delete_certificate(self): + certificate_id = self._get_param("certificateId") + self.iot_backend.delete_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict()) + + def describe_certificate(self): + certificate_id = self._get_param("certificateId") + certificate = self.iot_backend.describe_certificate( + certificate_id=certificate_id, + ) + return json.dumps(dict(certificateDescription=certificate.to_description_dict())) + + def list_certificates(self): + # page_size = self._get_int_param("pageSize") + # marker = self._get_param("marker") + # ascending_order = self._get_param("ascendingOrder") + certificates = self.iot_backend.list_certificates() + # TODO: handle pagination + return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) + + def update_certificate(self): + certificate_id = self._get_param("certificateId") + new_status = self._get_param("newStatus") + self.iot_backend.update_certificate( + certificate_id=certificate_id, + new_status=new_status, + ) + return json.dumps(dict()) + + def create_policy(self): + policy_name = self._get_param("policyName") + policy_document = self._get_param("policyDocument") + policy = self.iot_backend.create_policy( + policy_name=policy_name, + policy_document=policy_document, + ) + return json.dumps(policy.to_dict_at_creation()) + + def list_policies(self): + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_policies() + + # TODO: handle pagination + return json.dumps(dict(policies=[_.to_dict() for _ in policies])) + + def get_policy(self): + policy_name = self._get_param("policyName") + policy = self.iot_backend.get_policy( + policy_name=policy_name, + ) + return json.dumps(policy.to_get_dict()) + + def delete_policy(self): + policy_name = self._get_param("policyName") + self.iot_backend.delete_policy( + policy_name=policy_name, + ) + return json.dumps(dict()) + + def attach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.attach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_principal_policy(self): + policy_name = self._get_param("policyName") + principal = self.headers.get('x-amzn-iot-principal') + self.iot_backend.detach_principal_policy( + policy_name=policy_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_policies(self): + principal = self.headers.get('x-amzn-iot-principal') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + policies = self.iot_backend.list_principal_policies( + principal_arn=principal + ) + # TODO: handle pagination + next_marker = None + return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) + + def list_policy_principals(self): + policy_name = self.headers.get('x-amzn-iot-policy') + # marker = self._get_param("marker") + # page_size = self._get_int_param("pageSize") + # ascending_order = self._get_param("ascendingOrder") + principals = self.iot_backend.list_policy_principals( + policy_name=policy_name, + ) + # TODO: handle pagination + next_marker = None + return json.dumps(dict(principals=principals, nextMarker=next_marker)) + + def attach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.attach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def detach_thing_principal(self): + thing_name = self._get_param("thingName") + principal = self.headers.get('x-amzn-principal') + self.iot_backend.detach_thing_principal( + thing_name=thing_name, + principal_arn=principal, + ) + return json.dumps(dict()) + + def list_principal_things(self): + next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + principal = self.headers.get('x-amzn-principal') + things = self.iot_backend.list_principal_things( + principal_arn=principal, + ) + # TODO: handle pagination + next_token = None + return json.dumps(dict(things=things, nextToken=next_token)) + + def list_thing_principals(self): + thing_name = self._get_param("thingName") + principals = self.iot_backend.list_thing_principals( + thing_name=thing_name, + ) + return json.dumps(dict(principals=principals)) diff --git a/moto/iot/urls.py b/moto/iot/urls.py new file mode 100644 index 000000000..6d11c15a5 --- /dev/null +++ b/moto/iot/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import IoTResponse + +url_bases = [ + "https?://iot.(.+).amazonaws.com", +] + + +response = IoTResponse() + + +url_paths = { + '{0}/.*$': response.dispatch, +} diff --git a/moto/iotdata/__init__.py b/moto/iotdata/__init__.py new file mode 100644 index 000000000..214f2e575 --- /dev/null +++ b/moto/iotdata/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import iotdata_backends +from ..core.models import base_decorator + +iotdata_backend = iotdata_backends['us-east-1'] +mock_iotdata = base_decorator(iotdata_backends) diff --git a/moto/iotdata/exceptions.py b/moto/iotdata/exceptions.py new file mode 100644 index 000000000..ddc6b37fd --- /dev/null +++ b/moto/iotdata/exceptions.py @@ -0,0 +1,23 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class IoTDataPlaneClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(IoTDataPlaneClientError): + def __init__(self): + self.code = 404 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidRequestException(IoTDataPlaneClientError): + def __init__(self, message): + self.code = 400 + super(InvalidRequestException, self).__init__( + "InvalidRequestException", message + ) diff --git a/moto/iotdata/models.py b/moto/iotdata/models.py new file mode 100644 index 000000000..ad4caa89e --- /dev/null +++ b/moto/iotdata/models.py @@ -0,0 +1,193 @@ +from __future__ import unicode_literals +import json +import time +import boto3 +import jsondiff +from moto.core import BaseBackend, BaseModel +from moto.iot import iot_backends +from .exceptions import ( + ResourceNotFoundException, + InvalidRequestException +) + + +class FakeShadow(BaseModel): + """See the specification: + http://docs.aws.amazon.com/iot/latest/developerguide/thing-shadow-document-syntax.html + """ + def __init__(self, desired, reported, requested_payload, version, deleted=False): + self.desired = desired + self.reported = reported + self.requested_payload = requested_payload + self.version = version + self.timestamp = int(time.time()) + self.deleted = deleted + + self.metadata_desired = self._create_metadata_from_state(self.desired, self.timestamp) + self.metadata_reported = self._create_metadata_from_state(self.reported, self.timestamp) + + @classmethod + def create_from_previous_version(cls, previous_shadow, payload): + """ + set None to payload when you want to delete shadow + """ + version, previous_payload = (previous_shadow.version + 1, previous_shadow.to_dict(include_delta=False)) if previous_shadow else (1, {}) + + if payload is None: + # if given payload is None, delete existing payload + # this means the request was delete_thing_shadow + shadow = FakeShadow(None, None, None, version, deleted=True) + return shadow + + # we can make sure that payload has 'state' key + desired = payload['state'].get( + 'desired', + previous_payload.get('state', {}).get('desired', None) + ) + reported = payload['state'].get( + 'reported', + previous_payload.get('state', {}).get('reported', None) + ) + shadow = FakeShadow(desired, reported, payload, version) + return shadow + + @classmethod + def parse_payload(cls, desired, reported): + if desired is None: + delta = reported + elif reported is None: + delta = desired + else: + delta = jsondiff.diff(desired, reported) + return delta + + def _create_metadata_from_state(self, state, ts): + """ + state must be disired or reported stype dict object + replces primitive type with {"timestamp": ts} in dict + """ + if state is None: + return None + + def _f(elem, ts): + if isinstance(elem, dict): + return {_: _f(elem[_], ts) for _ in elem.keys()} + if isinstance(elem, list): + return [_f(_, ts) for _ in elem] + return {"timestamp": ts} + return _f(state, ts) + + def to_response_dict(self): + desired = self.requested_payload['state'].get('desired', None) + reported = self.requested_payload['state'].get('reported', None) + + payload = {} + if desired is not None: + payload['desired'] = desired + if reported is not None: + payload['reported'] = reported + + metadata = {} + if desired is not None: + metadata['desired'] = self._create_metadata_from_state(desired, self.timestamp) + if reported is not None: + metadata['reported'] = self._create_metadata_from_state(reported, self.timestamp) + return { + 'state': payload, + 'metadata': metadata, + 'timestamp': self.timestamp, + 'version': self.version + } + + def to_dict(self, include_delta=True): + """returning nothing except for just top-level keys for now. + """ + if self.deleted: + return { + 'timestamp': self.timestamp, + 'version': self.version + } + delta = self.parse_payload(self.desired, self.reported) + payload = {} + if self.desired is not None: + payload['desired'] = self.desired + if self.reported is not None: + payload['reported'] = self.reported + if include_delta and (delta is not None and len(delta.keys()) != 0): + payload['delta'] = delta + + metadata = {} + if self.metadata_desired is not None: + metadata['desired'] = self.metadata_desired + if self.metadata_reported is not None: + metadata['reported'] = self.metadata_reported + + return { + 'state': payload, + 'metadata': metadata, + 'timestamp': self.timestamp, + 'version': self.version + } + + +class IoTDataPlaneBackend(BaseBackend): + def __init__(self, region_name=None): + super(IoTDataPlaneBackend, self).__init__() + self.region_name = region_name + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + def update_thing_shadow(self, thing_name, payload): + """ + spec of payload: + - need node `state` + - state node must be an Object + - State contains an invalid node: 'foo' + """ + thing = iot_backends[self.region_name].describe_thing(thing_name) + + # validate + try: + payload = json.loads(payload) + except ValueError: + raise InvalidRequestException('invalid json') + if 'state' not in payload: + raise InvalidRequestException('need node `state`') + if not isinstance(payload['state'], dict): + raise InvalidRequestException('state node must be an Object') + if any(_ for _ in payload['state'].keys() if _ not in ['desired', 'reported']): + raise InvalidRequestException('State contains an invalid node') + + new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) + thing.thing_shadow = new_shadow + return thing.thing_shadow + + def get_thing_shadow(self, thing_name): + thing = iot_backends[self.region_name].describe_thing(thing_name) + + if thing.thing_shadow is None or thing.thing_shadow.deleted: + raise ResourceNotFoundException() + return thing.thing_shadow + + def delete_thing_shadow(self, thing_name): + """after deleting, get_thing_shadow will raise ResourceNotFound. + But version of the shadow keep increasing... + """ + thing = iot_backends[self.region_name].describe_thing(thing_name) + if thing.thing_shadow is None: + raise ResourceNotFoundException() + payload = None + new_shadow = FakeShadow.create_from_previous_version(thing.thing_shadow, payload) + thing.thing_shadow = new_shadow + return thing.thing_shadow + + def publish(self, topic, qos, payload): + # do nothing because client won't know about the result + return None + + +available_regions = boto3.session.Session().get_available_regions("iot-data") +iotdata_backends = {region: IoTDataPlaneBackend(region) for region in available_regions} diff --git a/moto/iotdata/responses.py b/moto/iotdata/responses.py new file mode 100644 index 000000000..8ab724ed1 --- /dev/null +++ b/moto/iotdata/responses.py @@ -0,0 +1,46 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import iotdata_backends +import json + + +class IoTDataPlaneResponse(BaseResponse): + SERVICE_NAME = 'iot-data' + + @property + def iotdata_backend(self): + return iotdata_backends[self.region] + + def update_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.body + payload = self.iotdata_backend.update_thing_shadow( + thing_name=thing_name, + payload=payload, + ) + return json.dumps(payload.to_response_dict()) + + def get_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.iotdata_backend.get_thing_shadow( + thing_name=thing_name, + ) + return json.dumps(payload.to_dict()) + + def delete_thing_shadow(self): + thing_name = self._get_param("thingName") + payload = self.iotdata_backend.delete_thing_shadow( + thing_name=thing_name, + ) + return json.dumps(payload.to_dict()) + + def publish(self): + topic = self._get_param("topic") + qos = self._get_int_param("qos") + payload = self._get_param("payload") + self.iotdata_backend.publish( + topic=topic, + qos=qos, + payload=payload, + ) + return json.dumps(dict()) diff --git a/moto/iotdata/urls.py b/moto/iotdata/urls.py new file mode 100644 index 000000000..a3bcb0a52 --- /dev/null +++ b/moto/iotdata/urls.py @@ -0,0 +1,14 @@ +from __future__ import unicode_literals +from .responses import IoTDataPlaneResponse + +url_bases = [ + "https?://data.iot.(.+).amazonaws.com", +] + + +response = IoTDataPlaneResponse() + + +url_paths = { + '{0}/.*$': response.dispatch, +} diff --git a/moto/kms/models.py b/moto/kms/models.py index be8c52162..ca27f030a 100644 --- a/moto/kms/models.py +++ b/moto/kms/models.py @@ -103,8 +103,10 @@ class KmsBackend(BaseBackend): self.key_to_aliases[target_key_id].add(alias_name) def delete_alias(self, alias_name): + """Delete the alias.""" for aliases in self.key_to_aliases.values(): - aliases.remove(alias_name) + if alias_name in aliases: + aliases.remove(alias_name) def get_all_aliases(self): return self.key_to_aliases diff --git a/moto/logs/models.py b/moto/logs/models.py index 14f511932..09dcb3645 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -22,6 +22,13 @@ class LogEvent: "timestamp": self.timestamp } + def to_response_dict(self): + return { + "ingestionTime": self.ingestionTime, + "message": self.message, + "timestamp": self.timestamp + } + class LogStream: _log_ids = 0 @@ -41,7 +48,14 @@ class LogStream: self.__class__._log_ids += 1 + def _update(self): + self.firstEventTimestamp = min([x.timestamp for x in self.events]) + self.lastEventTimestamp = max([x.timestamp for x in self.events]) + def to_describe_dict(self): + # Compute start and end times + self._update() + return { "arn": self.arn, "creationTime": self.creationTime, @@ -79,7 +93,7 @@ class LogStream: if next_token is None: next_token = 0 - events_page = events[next_token: next_token + limit] + events_page = [event.to_response_dict() for event in events[next_token: next_token + limit]] next_token += limit if next_token >= len(self.events): next_token = None @@ -120,17 +134,17 @@ class LogGroup: del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): - log_streams = [stream.to_describe_dict() for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] + log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] - def sorter(stream): - return stream.name if order_by == 'logStreamName' else stream.lastEventTimestamp + def sorter(item): + return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp'] if next_token is None: next_token = 0 log_streams = sorted(log_streams, key=sorter, reverse=descending) new_token = next_token + limit - log_streams_page = log_streams[next_token: new_token] + log_streams_page = [x[1] for x in log_streams[next_token: new_token]] if new_token >= len(log_streams): new_token = None diff --git a/moto/logs/responses.py b/moto/logs/responses.py index 4cb9caa6a..e0a17f5f8 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -47,7 +47,7 @@ class LogsResponse(BaseResponse): def describe_log_streams(self): log_group_name = self._get_param('logGroupName') - log_stream_name_prefix = self._get_param('logStreamNamePrefix') + log_stream_name_prefix = self._get_param('logStreamNamePrefix', '') descending = self._get_param('descending', False) limit = self._get_param('limit', 50) assert limit <= 50 @@ -83,13 +83,13 @@ class LogsResponse(BaseResponse): limit = self._get_param('limit', 10000) assert limit <= 10000 next_token = self._get_param('nextToken') - start_from_head = self._get_param('startFromHead') + start_from_head = self._get_param('startFromHead', False) events, next_backward_token, next_foward_token = \ self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) return json.dumps({ - "events": events, + "events": [ob.__dict__ for ob in events], "nextBackwardToken": next_backward_token, "nextForwardToken": next_foward_token }) diff --git a/moto/rds/responses.py b/moto/rds/responses.py index 0895a8bf2..987a6f21a 100644 --- a/moto/rds/responses.py +++ b/moto/rds/responses.py @@ -107,6 +107,9 @@ class RDSResponse(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() + new_db_instance_identifier = self._get_param('NewDBInstanceIdentifier') + if new_db_instance_identifier: + db_kwargs['new_db_instance_identifier'] = new_db_instance_identifier database = self.backend.modify_database( db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) diff --git a/moto/rds2/models.py b/moto/rds2/models.py index bb66ead57..268ae5af2 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -704,7 +704,8 @@ class RDS2Backend(BaseBackend): if self.arn_regex.match(source_database_id): db_kwargs['region'] = self.region - replica = copy.deepcopy(primary) + # Shouldn't really copy here as the instance is duplicated. RDS replicas have different instances. + replica = copy.copy(primary) replica.update(db_kwargs) replica.set_as_replica() self.databases[database_id] = replica @@ -735,6 +736,10 @@ class RDS2Backend(BaseBackend): def modify_database(self, db_instance_identifier, db_kwargs): database = self.describe_databases(db_instance_identifier)[0] + if 'new_db_instance_identifier' in db_kwargs: + del self.databases[db_instance_identifier] + db_instance_identifier = db_kwargs['db_instance_identifier'] = db_kwargs.pop('new_db_instance_identifier') + self.databases[db_instance_identifier] = database database.update(db_kwargs) return database @@ -752,13 +757,13 @@ class RDS2Backend(BaseBackend): raise InvalidDBInstanceStateError(db_instance_identifier, 'stop') if db_snapshot_identifier: self.create_snapshot(db_instance_identifier, db_snapshot_identifier) - database.status = 'shutdown' + database.status = 'stopped' return database def start_database(self, db_instance_identifier): database = self.describe_databases(db_instance_identifier)[0] # todo: bunch of different error messages to be generated from this api call - if database.status != 'shutdown': + if database.status != 'stopped': raise InvalidDBInstanceStateError(db_instance_identifier, 'start') database.status = 'available' return database diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index bf76660aa..3e093221d 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -135,6 +135,9 @@ class RDS2Response(BaseResponse): def modify_db_instance(self): db_instance_identifier = self._get_param('DBInstanceIdentifier') db_kwargs = self._get_db_kwargs() + new_db_instance_identifier = self._get_param('NewDBInstanceIdentifier') + if new_db_instance_identifier: + db_kwargs['new_db_instance_identifier'] = new_db_instance_identifier database = self.backend.modify_database( db_instance_identifier, db_kwargs) template = self.response_template(MODIFY_DATABASE_TEMPLATE) diff --git a/moto/resourcegroupstaggingapi/__init__.py b/moto/resourcegroupstaggingapi/__init__.py new file mode 100644 index 000000000..bd0c4a7df --- /dev/null +++ b/moto/resourcegroupstaggingapi/__init__.py @@ -0,0 +1,6 @@ +from __future__ import unicode_literals +from .models import resourcegroupstaggingapi_backends +from ..core.models import base_decorator + +resourcegroupstaggingapi_backend = resourcegroupstaggingapi_backends['us-east-1'] +mock_resourcegroupstaggingapi = base_decorator(resourcegroupstaggingapi_backends) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py new file mode 100644 index 000000000..fbc54454b --- /dev/null +++ b/moto/resourcegroupstaggingapi/models.py @@ -0,0 +1,511 @@ +from __future__ import unicode_literals +import uuid +import boto3 +import six +from moto.core import BaseBackend +from moto.core.exceptions import RESTError + +from moto.s3 import s3_backends +from moto.ec2 import ec2_backends +from moto.elb import elb_backends +from moto.elbv2 import elbv2_backends +from moto.kinesis import kinesis_backends +from moto.rds2 import rds2_backends +from moto.glacier import glacier_backends +from moto.redshift import redshift_backends +from moto.emr import emr_backends + +# Left: EC2 ElastiCache RDS ELB CloudFront WorkSpaces Lambda EMR Glacier Kinesis Redshift Route53 +# StorageGateway DynamoDB MachineLearning ACM DirectConnect DirectoryService CloudHSM +# Inspector Elasticsearch + + +class ResourceGroupsTaggingAPIBackend(BaseBackend): + def __init__(self, region_name=None): + super(ResourceGroupsTaggingAPIBackend, self).__init__() + self.region_name = region_name + + self._pages = {} + # Like 'someuuid': {'gen': , 'misc': None} + # Misc is there for peeking from a generator and it cant + # fit in the current request. As we only store generators + # theres not really any point to clean up + + def reset(self): + region_name = self.region_name + self.__dict__ = {} + self.__init__(region_name) + + @property + def s3_backend(self): + """ + :rtype: moto.s3.models.S3Backend + """ + return s3_backends['global'] + + @property + def ec2_backend(self): + """ + :rtype: moto.ec2.models.EC2Backend + """ + return ec2_backends[self.region_name] + + @property + def elb_backend(self): + """ + :rtype: moto.elb.models.ELBBackend + """ + return elb_backends[self.region_name] + + @property + def elbv2_backend(self): + """ + :rtype: moto.elbv2.models.ELBv2Backend + """ + return elbv2_backends[self.region_name] + + @property + def kinesis_backend(self): + """ + :rtype: moto.kinesis.models.KinesisBackend + """ + return kinesis_backends[self.region_name] + + @property + def rds_backend(self): + """ + :rtype: moto.rds2.models.RDS2Backend + """ + return rds2_backends[self.region_name] + + @property + def glacier_backend(self): + """ + :rtype: moto.glacier.models.GlacierBackend + """ + return glacier_backends[self.region_name] + + @property + def emr_backend(self): + """ + :rtype: moto.emr.models.ElasticMapReduceBackend + """ + return emr_backends[self.region_name] + + @property + def redshift_backend(self): + """ + :rtype: moto.redshift.models.RedshiftBackend + """ + return redshift_backends[self.region_name] + + def _get_resources_generator(self, tag_filters=None, resource_type_filters=None): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # TODO move these to their respective backends + filters = [lambda t, v: True] + for tag_filter_dict in tag_filters: + values = tag_filter_dict.get('Values', []) + if len(values) == 0: + # Check key matches + filters.append(lambda t, v: t == tag_filter_dict['Key']) + elif len(values) == 1: + # Check its exactly the same as key, value + filters.append(lambda t, v: t == tag_filter_dict['Key'] and v == values[0]) + else: + # Check key matches and value is one of the provided values + filters.append(lambda t, v: t == tag_filter_dict['Key'] and v in values) + + def tag_filter(tag_list): + result = [] + + for tag in tag_list: + temp_result = [] + for f in filters: + f_result = f(tag['Key'], tag['Value']) + temp_result.append(f_result) + result.append(all(temp_result)) + + return any(result) + + # Do S3, resource type s3 + if not resource_type_filters or 's3' in resource_type_filters: + for bucket in self.s3_backend.buckets.values(): + tags = [] + for tag in bucket.tags.tag_set.tags: + tags.append({'Key': tag.key, 'Value': tag.value}) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:s3:::' + bucket.name, 'Tags': tags} + + # EC2 tags + def get_ec2_tags(res_id): + result = [] + for key, value in self.ec2_backend.tags.get(res_id, {}).items(): + result.append({'Key': key, 'Value': value}) + return result + + # EC2 AMI, resource type ec2:image + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:image' in resource_type_filters: + for ami in self.ec2_backend.amis.values(): + tags = get_ec2_tags(ami.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::image/{1}'.format(self.region_name, ami.id), 'Tags': tags} + + # EC2 Instance, resource type ec2:instance + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:instance' in resource_type_filters: + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + tags = get_ec2_tags(instance.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::instance/{1}'.format(self.region_name, instance.id), 'Tags': tags} + + # EC2 NetworkInterface, resource type ec2:network-interface + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:network-interface' in resource_type_filters: + for eni in self.ec2_backend.enis.values(): + tags = get_ec2_tags(eni.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::network-interface/{1}'.format(self.region_name, eni.id), 'Tags': tags} + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:security-group' in resource_type_filters: + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + tags = get_ec2_tags(sg.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::security-group/{1}'.format(self.region_name, sg.id), 'Tags': tags} + + # EC2 Snapshot, resource type ec2:snapshot + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:snapshot' in resource_type_filters: + for snapshot in self.ec2_backend.snapshots.values(): + tags = get_ec2_tags(snapshot.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::snapshot/{1}'.format(self.region_name, snapshot.id), 'Tags': tags} + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + if not resource_type_filters or 'ec2' in resource_type_filters or 'ec2:volume' in resource_type_filters: + for volume in self.ec2_backend.volumes.values(): + tags = get_ec2_tags(volume.id) + + if not tags or not tag_filter(tags): # Skip if no tags, or invalid filter + continue + yield {'ResourceARN': 'arn:aws:ec2:{0}::volume/{1}'.format(self.region_name, volume.id), 'Tags': tags} + + # TODO add these to the keys and values functions / combine functions + # ELB + + # EMR Cluster + + # Glacier Vault + + # Kinesis + + # RDS Instance + # RDS Reserved Database Instance + # RDS Option Group + # RDS Parameter Group + # RDS Security Group + # RDS Snapshot + # RDS Subnet Group + # RDS Event Subscription + + # RedShift Cluster + # RedShift Hardware security module (HSM) client certificate + # RedShift HSM connection + # RedShift Parameter group + # RedShift Snapshot + # RedShift Subnet group + + # VPC + # VPC Customer Gateway + # VPC DHCP Option Set + # VPC Internet Gateway + # VPC Network ACL + # VPC Route Table + # VPC Subnet + # VPC Virtual Private Gateway + # VPC VPN Connection + + def _get_tag_keys_generator(self): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # Do S3, resource type s3 + for bucket in self.s3_backend.buckets.values(): + for tag in bucket.tags.tag_set.tags: + yield tag.key + + # EC2 tags + def get_ec2_keys(res_id): + result = [] + for key in self.ec2_backend.tags.get(res_id, {}): + result.append(key) + return result + + # EC2 AMI, resource type ec2:image + for ami in self.ec2_backend.amis.values(): + for key in get_ec2_keys(ami.id): + yield key + + # EC2 Instance, resource type ec2:instance + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + for key in get_ec2_keys(instance.id): + yield key + + # EC2 NetworkInterface, resource type ec2:network-interface + for eni in self.ec2_backend.enis.values(): + for key in get_ec2_keys(eni.id): + yield key + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + for key in get_ec2_keys(sg.id): + yield key + + # EC2 Snapshot, resource type ec2:snapshot + for snapshot in self.ec2_backend.snapshots.values(): + for key in get_ec2_keys(snapshot.id): + yield key + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + for volume in self.ec2_backend.volumes.values(): + for key in get_ec2_keys(volume.id): + yield key + + def _get_tag_values_generator(self, tag_key): + # Look at + # https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + + # Do S3, resource type s3 + for bucket in self.s3_backend.buckets.values(): + for tag in bucket.tags.tag_set.tags: + if tag.key == tag_key: + yield tag.value + + # EC2 tags + def get_ec2_values(res_id): + result = [] + for key, value in self.ec2_backend.tags.get(res_id, {}).items(): + if key == tag_key: + result.append(value) + return result + + # EC2 AMI, resource type ec2:image + for ami in self.ec2_backend.amis.values(): + for value in get_ec2_values(ami.id): + yield value + + # EC2 Instance, resource type ec2:instance + for reservation in self.ec2_backend.reservations.values(): + for instance in reservation.instances: + for value in get_ec2_values(instance.id): + yield value + + # EC2 NetworkInterface, resource type ec2:network-interface + for eni in self.ec2_backend.enis.values(): + for value in get_ec2_values(eni.id): + yield value + + # TODO EC2 ReservedInstance + + # EC2 SecurityGroup, resource type ec2:security-group + for vpc in self.ec2_backend.groups.values(): + for sg in vpc.values(): + for value in get_ec2_values(sg.id): + yield value + + # EC2 Snapshot, resource type ec2:snapshot + for snapshot in self.ec2_backend.snapshots.values(): + for value in get_ec2_values(snapshot.id): + yield value + + # TODO EC2 SpotInstanceRequest + + # EC2 Volume, resource type ec2:volume + for volume in self.ec2_backend.volumes.values(): + for value in get_ec2_values(volume.id): + yield value + + def get_resources(self, pagination_token=None, + resources_per_page=50, tags_per_page=100, + tag_filters=None, resource_type_filters=None): + # Simple range checning + if 100 >= tags_per_page >= 500: + raise RESTError('InvalidParameterException', 'TagsPerPage must be between 100 and 500') + if 1 >= resources_per_page >= 50: + raise RESTError('InvalidParameterException', 'ResourcesPerPage must be between 1 and 50') + + # If we have a token, go and find the respective generator, or error + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_resources_generator(tag_filters=tag_filters, + resource_type_filters=resource_type_filters) + left_over = None + + result = [] + current_tags = 0 + current_resources = 0 + if left_over: + result.append(left_over) + current_resources += 1 + current_tags += len(left_over['Tags']) + + try: + while True: + # Generator format: [{'ResourceARN': str, 'Tags': [{'Key': str, 'Value': str]}, ...] + next_item = six.next(generator) + resource_tags = len(next_item['Tags']) + + if current_resources >= resources_per_page: + break + if current_tags + resource_tags >= tags_per_page: + break + + current_resources += 1 + current_tags += resource_tags + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + def get_tag_keys(self, pagination_token=None): + + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_tag_keys_generator() + left_over = None + + result = [] + current_tags = 0 + if left_over: + result.append(left_over) + current_tags += 1 + + try: + while True: + # Generator format: ['tag', 'tag', 'tag', ...] + next_item = six.next(generator) + + if current_tags + 1 >= 128: + break + + current_tags += 1 + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + def get_tag_values(self, pagination_token, key): + + if pagination_token: + if pagination_token not in self._pages: + raise RESTError('PaginationTokenExpiredException', 'Token does not exist') + + generator = self._pages[pagination_token]['gen'] + left_over = self._pages[pagination_token]['misc'] + else: + generator = self._get_tag_values_generator(key) + left_over = None + + result = [] + current_tags = 0 + if left_over: + result.append(left_over) + current_tags += 1 + + try: + while True: + # Generator format: ['value', 'value', 'value', ...] + next_item = six.next(generator) + + if current_tags + 1 >= 128: + break + + current_tags += 1 + + result.append(next_item) + + except StopIteration: + # Finished generator before invalidating page limiting constraints + return None, result + + # Didn't hit StopIteration so there's stuff left in generator + new_token = str(uuid.uuid4()) + self._pages[new_token] = {'gen': generator, 'misc': next_item} + + # Token used up, might as well bin now, if you call it again your an idiot + if pagination_token: + del self._pages[pagination_token] + + return new_token, result + + # These methods will be called from responses.py. + # They should call a tag function inside of the moto module + # that governs the resource, that way if the target module + # changes how tags are delt with theres less to change + + # def tag_resources(self, resource_arn_list, tags): + # return failed_resources_map + # + # def untag_resources(self, resource_arn_list, tag_keys): + # return failed_resources_map + + +available_regions = boto3.session.Session().get_available_regions("resourcegroupstaggingapi") +resourcegroupstaggingapi_backends = {region: ResourceGroupsTaggingAPIBackend(region) for region in available_regions} diff --git a/moto/resourcegroupstaggingapi/responses.py b/moto/resourcegroupstaggingapi/responses.py new file mode 100644 index 000000000..966778f29 --- /dev/null +++ b/moto/resourcegroupstaggingapi/responses.py @@ -0,0 +1,97 @@ +from __future__ import unicode_literals +from moto.core.responses import BaseResponse +from .models import resourcegroupstaggingapi_backends +import json + + +class ResourceGroupsTaggingAPIResponse(BaseResponse): + SERVICE_NAME = 'resourcegroupstaggingapi' + + @property + def backend(self): + """ + Backend + :returns: Resource tagging api backend + :rtype: moto.resourcegroupstaggingapi.models.ResourceGroupsTaggingAPIBackend + """ + return resourcegroupstaggingapi_backends[self.region] + + def get_resources(self): + pagination_token = self._get_param("PaginationToken") + tag_filters = self._get_param("TagFilters", []) + resources_per_page = self._get_int_param("ResourcesPerPage", 50) + tags_per_page = self._get_int_param("TagsPerPage", 100) + resource_type_filters = self._get_param("ResourceTypeFilters", []) + + pagination_token, resource_tag_mapping_list = self.backend.get_resources( + pagination_token=pagination_token, + tag_filters=tag_filters, + resources_per_page=resources_per_page, + tags_per_page=tags_per_page, + resource_type_filters=resource_type_filters, + ) + + # Format tag response + response = { + 'ResourceTagMappingList': resource_tag_mapping_list + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + def get_tag_keys(self): + pagination_token = self._get_param("PaginationToken") + pagination_token, tag_keys = self.backend.get_tag_keys( + pagination_token=pagination_token, + ) + + response = { + 'TagKeys': tag_keys + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + def get_tag_values(self): + pagination_token = self._get_param("PaginationToken") + key = self._get_param("Key") + pagination_token, tag_values = self.backend.get_tag_values( + pagination_token=pagination_token, + key=key, + ) + + response = { + 'TagValues': tag_values + } + if pagination_token: + response['PaginationToken'] = pagination_token + + return json.dumps(response) + + # These methods are all thats left to be implemented + # the response is already set up, all thats needed is + # the respective model function to be implemented. + # + # def tag_resources(self): + # resource_arn_list = self._get_list_prefix("ResourceARNList.member") + # tags = self._get_param("Tags") + # failed_resources_map = self.backend.tag_resources( + # resource_arn_list=resource_arn_list, + # tags=tags, + # ) + # + # # failed_resources_map should be {'resource': {'ErrorCode': str, 'ErrorMessage': str, 'StatusCode': int}} + # return json.dumps({'FailedResourcesMap': failed_resources_map}) + # + # def untag_resources(self): + # resource_arn_list = self._get_list_prefix("ResourceARNList.member") + # tag_keys = self._get_list_prefix("TagKeys.member") + # failed_resources_map = self.backend.untag_resources( + # resource_arn_list=resource_arn_list, + # tag_keys=tag_keys, + # ) + # + # # failed_resources_map should be {'resource': {'ErrorCode': str, 'ErrorMessage': str, 'StatusCode': int}} + # return json.dumps({'FailedResourcesMap': failed_resources_map}) diff --git a/moto/resourcegroupstaggingapi/urls.py b/moto/resourcegroupstaggingapi/urls.py new file mode 100644 index 000000000..a972df276 --- /dev/null +++ b/moto/resourcegroupstaggingapi/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import ResourceGroupsTaggingAPIResponse + +url_bases = [ + "https?://tagging.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': ResourceGroupsTaggingAPIResponse.dispatch, +} diff --git a/moto/route53/models.py b/moto/route53/models.py index d12f4ee7a..af8bb690a 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -196,20 +196,20 @@ class FakeZone(BaseModel): self.rrsets = [ record_set for record_set in self.rrsets if record_set.set_identifier != set_identifier] - def get_record_sets(self, type_filter, name_filter): + def get_record_sets(self, start_type, start_name): record_sets = list(self.rrsets) # Copy the list - if type_filter: + if start_type: record_sets = [ - record_set for record_set in record_sets if record_set._type == type_filter] - if name_filter: + record_set for record_set in record_sets if record_set._type >= start_type] + if start_name: record_sets = [ - record_set for record_set in record_sets if record_set.name == name_filter] + record_set for record_set in record_sets if record_set.name >= start_name] return record_sets @property def physical_resource_id(self): - return self.name + return self.id @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 2419f896d..00e5c60a5 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -151,9 +151,9 @@ class Route53(BaseResponse): elif method == "GET": querystring = parse_qs(parsed_url.query) template = Template(LIST_RRSET_REPONSE) - type_filter = querystring.get("type", [None])[0] - name_filter = querystring.get("name", [None])[0] - record_sets = the_zone.get_record_sets(type_filter, name_filter) + start_type = querystring.get("type", [None])[0] + start_name = querystring.get("name", [None])[0] + record_sets = the_zone.get_record_sets(start_type, start_name) return 200, headers, template.render(record_sets=record_sets) def health_check_response(self, request, full_url, headers): diff --git a/moto/s3/models.py b/moto/s3/models.py index ae05292f2..91d3c1e2d 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -81,6 +81,9 @@ class FakeKey(BaseModel): def restore(self, days): self._expiry = datetime.datetime.utcnow() + datetime.timedelta(days) + def increment_version(self): + self._version_id += 1 + @property def etag(self): if self._etag is None: @@ -323,19 +326,10 @@ class CorsRule(BaseModel): def __init__(self, allowed_methods, allowed_origins, allowed_headers=None, expose_headers=None, max_age_seconds=None): - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - - self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, str_type) else allowed_methods - self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, str_type) else allowed_origins - self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, str_type) else allowed_headers - self.exposed_headers = [expose_headers] if isinstance(expose_headers, str_type) else expose_headers + self.allowed_methods = [allowed_methods] if isinstance(allowed_methods, six.string_types) else allowed_methods + self.allowed_origins = [allowed_origins] if isinstance(allowed_origins, six.string_types) else allowed_origins + self.allowed_headers = [allowed_headers] if isinstance(allowed_headers, six.string_types) else allowed_headers + self.exposed_headers = [expose_headers] if isinstance(expose_headers, six.string_types) else expose_headers self.max_age_seconds = max_age_seconds @@ -389,25 +383,16 @@ class FakeBucket(BaseModel): if len(rules) > 100: raise MalformedXML() - # Python 2 and 3 have different string types for handling unicodes. Python 2 wants `basestring`, - # whereas Python 3 is OK with str. This causes issues with the XML parser, which returns - # unicode strings in Python 2. So, need to do this to make it work in both Python 2 and 3: - import sys - if sys.version_info >= (3, 0): - str_type = str - else: - str_type = basestring # noqa - for rule in rules: - assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], str_type) - assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], str_type) + assert isinstance(rule["AllowedMethod"], list) or isinstance(rule["AllowedMethod"], six.string_types) + assert isinstance(rule["AllowedOrigin"], list) or isinstance(rule["AllowedOrigin"], six.string_types) assert isinstance(rule.get("AllowedHeader", []), list) or isinstance(rule.get("AllowedHeader", ""), - str_type) + six.string_types) assert isinstance(rule.get("ExposedHeader", []), list) or isinstance(rule.get("ExposedHeader", ""), - str_type) - assert isinstance(rule.get("MaxAgeSeconds", "0"), str_type) + six.string_types) + assert isinstance(rule.get("MaxAgeSeconds", "0"), six.string_types) - if isinstance(rule["AllowedMethod"], str_type): + if isinstance(rule["AllowedMethod"], six.string_types): methods = [rule["AllowedMethod"]] else: methods = rule["AllowedMethod"] @@ -745,6 +730,10 @@ class S3Backend(BaseBackend): if dest_key_name != src_key_name: key = key.copy(dest_key_name) dest_bucket.keys[dest_key_name] = key + + # By this point, the destination key must exist, or KeyError + if dest_bucket.is_versioned: + dest_bucket.keys[dest_key_name].increment_version() if storage is not None: key.set_storage_class(storage) if acl is not None: diff --git a/moto/s3/responses.py b/moto/s3/responses.py index b04cb9496..6abb4f2d1 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -8,6 +8,7 @@ from six.moves.urllib.parse import parse_qs, urlparse import xmltodict +from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys @@ -54,8 +55,10 @@ class ResponseObject(_TemplateEnvironmentMixin): if not host: host = urlparse(request.url).netloc - if not host or host.startswith("localhost") or re.match(r"^[^.]+$", host): - # For localhost or local domain names, default to path-based buckets + if (not host or host.startswith('localhost') or + re.match(r'^[^.]+$', host) or re.match(r'^.*\.svc\.cluster\.local$', host)): + # Default to path-based buckets for (1) localhost, (2) local host names that do not + # contain a "." (e.g., Docker container host names), or (3) kubernetes host names return False match = re.match(r'^([^\[\]:]+)(:\d+)?$', host) @@ -113,7 +116,10 @@ class ResponseObject(_TemplateEnvironmentMixin): return 200, {}, response.encode("utf-8") else: status_code, headers, response_content = response - return status_code, headers, response_content.encode("utf-8") + if not isinstance(response_content, six.binary_type): + response_content = response_content.encode("utf-8") + + return status_code, headers, response_content def _bucket_response(self, request, full_url, headers): parsed_url = urlparse(full_url) @@ -139,6 +145,7 @@ class ResponseObject(_TemplateEnvironmentMixin): body = b'' if isinstance(body, six.binary_type): body = body.decode('utf-8') + body = u'{0}'.format(body).encode('utf-8') if method == 'HEAD': return self._bucket_response_head(bucket_name, headers) @@ -209,7 +216,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if not website_configuration: template = self.response_template(S3_NO_BUCKET_WEBSITE_CONFIG) return 404, {}, template.render(bucket_name=bucket_name) - return website_configuration + return 200, {}, website_configuration elif 'acl' in querystring: bucket = self.backend.get_bucket(bucket_name) template = self.response_template(S3_OBJECT_ACL_RESPONSE) @@ -355,7 +362,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" if 'versioning' in querystring: - ver = re.search('([A-Za-z]+)', body) + ver = re.search('([A-Za-z]+)', body.decode()) if ver: self.backend.set_bucket_versioning(bucket_name, ver.group(1)) template = self.response_template(S3_BUCKET_VERSIONING) @@ -444,7 +451,12 @@ class ResponseObject(_TemplateEnvironmentMixin): def _bucket_response_post(self, request, body, bucket_name, headers): if not request.headers.get('Content-Length'): return 411, {}, "Content-Length required" - path = request.path if hasattr(request, 'path') else request.path_url + + if isinstance(request, HTTPrettyRequest): + path = request.path + else: + path = request.full_path if hasattr(request, 'full_path') else request.path_url + if self.is_delete_keys(request, path, bucket_name): return self._bucket_response_delete_keys(request, body, bucket_name, headers) @@ -454,6 +466,8 @@ class ResponseObject(_TemplateEnvironmentMixin): form = request.form else: # HTTPretty, build new form object + body = body.decode() + form = {} for kv in body.split('&'): k, v = kv.split('=') @@ -764,7 +778,7 @@ class ResponseObject(_TemplateEnvironmentMixin): return FakeTagging() def _tagging_from_xml(self, xml): - parsed_xml = xmltodict.parse(xml) + parsed_xml = xmltodict.parse(xml, force_list={'Tag': True}) tags = [] for tag in parsed_xml['Tagging']['TagSet']['Tag']: diff --git a/moto/sns/exceptions.py b/moto/sns/exceptions.py index 95b91acca..0e7a0bdcf 100644 --- a/moto/sns/exceptions.py +++ b/moto/sns/exceptions.py @@ -32,3 +32,11 @@ class SNSInvalidParameter(RESTError): def __init__(self, message): super(SNSInvalidParameter, self).__init__( "InvalidParameter", message) + + +class InvalidParameterValue(RESTError): + code = 400 + + def __init__(self, message): + super(InvalidParameterValue, self).__init__( + "InvalidParameterValue", message) diff --git a/moto/sns/models.py b/moto/sns/models.py index 5b7277d22..80da5f92f 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -7,6 +7,7 @@ import json import boto.sns import requests import six +import re from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel @@ -15,7 +16,8 @@ from moto.sqs import sqs_backends from moto.awslambda import lambda_backends from .exceptions import ( - SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter + SNSNotFoundError, DuplicateSnsEndpointError, SnsEndpointDisabled, SNSInvalidParameter, + InvalidParameterValue ) from .utils import make_arn_for_topic, make_arn_for_subscription @@ -146,7 +148,7 @@ class PlatformEndpoint(BaseModel): if 'Token' not in self.attributes: self.attributes['Token'] = self.token if 'Enabled' not in self.attributes: - self.attributes['Enabled'] = True + self.attributes['Enabled'] = 'True' @property def enabled(self): @@ -193,9 +195,15 @@ class SNSBackend(BaseBackend): self.sms_attributes.update(attrs) def create_topic(self, name): - topic = Topic(name, self) - self.topics[topic.arn] = topic - return topic + fails_constraints = not re.match(r'^[a-zA-Z0-9](?:[A-Za-z0-9_-]{0,253}[a-zA-Z0-9])?$', name) + if fails_constraints: + raise InvalidParameterValue("Topic names must be made up of only uppercase and lowercase ASCII letters, numbers, underscores, and hyphens, and must be between 1 and 256 characters long.") + candidate_topic = Topic(name, self) + if candidate_topic.arn in self.topics: + return self.topics[candidate_topic.arn] + else: + self.topics[candidate_topic.arn] = candidate_topic + return candidate_topic def _get_values_nexttoken(self, values_map, next_token=None): if next_token is None: @@ -256,7 +264,10 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message): + def publish(self, arn, message, subject=None): + if subject is not None and len(subject) >= 100: + raise ValueError('Subject must be less than 100 characters') + try: topic = self.get_topic(arn) message_id = topic.publish(message) diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 85764aa58..3b4aade80 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -239,6 +239,8 @@ class SNSResponse(BaseResponse): target_arn = self._get_param('TargetArn') topic_arn = self._get_param('TopicArn') phone_number = self._get_param('PhoneNumber') + subject = self._get_param('Subject') + if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -261,7 +263,12 @@ class SNSResponse(BaseResponse): arn = topic_arn message = self._get_param('Message') - message_id = self.backend.publish(arn, message) + + try: + message_id = self.backend.publish(arn, message, subject=subject) + except ValueError as err: + error_response = self._error('InvalidParameter', str(err)) + return error_response, dict(status=400) if self.request_json: return json.dumps({ diff --git a/moto/sqs/exceptions.py b/moto/sqs/exceptions.py index d72cfdffc..baf721b53 100644 --- a/moto/sqs/exceptions.py +++ b/moto/sqs/exceptions.py @@ -16,3 +16,8 @@ class MessageAttributesInvalid(Exception): def __init__(self, description): self.description = description + + +class QueueDoesNotExist(Exception): + status_code = 404 + description = "The specified queue does not exist for this wsdl version." diff --git a/moto/sqs/models.py b/moto/sqs/models.py index e9d889453..85b69ab0e 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -2,6 +2,7 @@ from __future__ import unicode_literals import base64 import hashlib +import json import re import six import struct @@ -9,10 +10,16 @@ from xml.sax.saxutils import escape import boto.sqs +from moto.core.exceptions import RESTError from moto.core import BaseBackend, BaseModel from moto.core.utils import camelcase_to_underscores, get_random_message_id, unix_time, unix_time_millis from .utils import generate_receipt_handle -from .exceptions import ReceiptHandleIsInvalid, MessageNotInflight, MessageAttributesInvalid +from .exceptions import ( + MessageAttributesInvalid, + MessageNotInflight, + QueueDoesNotExist, + ReceiptHandleIsInvalid, +) DEFAULT_ACCOUNT_ID = 123456789012 DEFAULT_SENDER_ID = "AIDAIT2UOQQY3AUEKVGXU" @@ -161,11 +168,14 @@ class Queue(BaseModel): 'ReceiveMessageWaitTimeSeconds', 'VisibilityTimeout', 'WaitTimeSeconds'] + ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', + 'GetQueueUrl', 'ReceiveMessage', 'SendMessage') def __init__(self, name, region, **kwargs): self.name = name self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region + self.tags = {} self._messages = [] @@ -184,14 +194,42 @@ class Queue(BaseModel): self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) + self.permissions = {} # wait_time_seconds will be set to immediate return messages self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) + self.redrive_policy = {} + self.dead_letter_queue = None + + if 'RedrivePolicy' in kwargs: + self._setup_dlq(kwargs['RedrivePolicy']) + # Check some conditions if self.fifo_queue and not self.name.endswith('.fifo'): raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') + def _setup_dlq(self, policy_json): + try: + self.redrive_policy = json.loads(policy_json) + except ValueError: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain valid json') + + if 'deadLetterTargetArn' not in self.redrive_policy: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain deadLetterTargetArn') + if 'maxReceiveCount' not in self.redrive_policy: + raise RESTError('InvalidParameterValue', 'Redrive policy does not contain maxReceiveCount') + + for queue in sqs_backends[self.region].queues.values(): + if queue.queue_arn == self.redrive_policy['deadLetterTargetArn']: + self.dead_letter_queue = queue + + if self.fifo_queue and not queue.fifo_queue: + raise RESTError('InvalidParameterCombination', 'Fifo queues cannot use non fifo dead letter queues') + break + else: + raise RESTError('AWS.SimpleQueueService.NonExistentQueue', 'Could not find DLQ for {0}'.format(self.redrive_policy['deadLetterTargetArn'])) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -304,7 +342,10 @@ class SQSBackend(BaseBackend): return qs def get_queue(self, queue_name): - return self.queues.get(queue_name, None) + queue = self.queues.get(queue_name) + if queue is None: + raise QueueDoesNotExist() + return queue def delete_queue(self, queue_name): if queue_name in self.queues: @@ -374,9 +415,14 @@ class SQSBackend(BaseBackend): time.sleep(0.001) continue + messages_to_dlq = [] for message in queue.messages: if not message.visible: continue + if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']: + messages_to_dlq.append(message) + continue + message.mark_received( visibility_timeout=visibility_timeout ) @@ -384,6 +430,10 @@ class SQSBackend(BaseBackend): if len(result) >= count: break + for message in messages_to_dlq: + queue._messages.remove(message) + queue.dead_letter_queue.add_message(message) + return result def delete_message(self, queue_name, receipt_handle): @@ -411,6 +461,49 @@ class SQSBackend(BaseBackend): queue = self.get_queue(queue_name) queue._messages = [] + def list_dead_letter_source_queues(self, queue_name): + dlq = self.get_queue(queue_name) + + queues = [] + for queue in self.queues.values(): + if queue.dead_letter_queue is dlq: + queues.append(queue) + + return queues + + def add_permission(self, queue_name, actions, account_ids, label): + queue = self.get_queue(queue_name) + + if actions is None or len(actions) == 0: + raise RESTError('InvalidParameterValue', 'Need at least one Action') + if account_ids is None or len(account_ids) == 0: + raise RESTError('InvalidParameterValue', 'Need at least one Account ID') + + if not all([item in Queue.ALLOWED_PERMISSIONS for item in actions]): + raise RESTError('InvalidParameterValue', 'Invalid permissions') + + queue.permissions[label] = (account_ids, actions) + + def remove_permission(self, queue_name, label): + queue = self.get_queue(queue_name) + + if label not in queue.permissions: + raise RESTError('InvalidParameterValue', 'Permission doesnt exist for the given label') + + del queue.permissions[label] + + def tag_queue(self, queue_name, tags): + queue = self.get_queue(queue_name) + queue.tags.update(tags) + + def untag_queue(self, queue_name, tag_keys): + queue = self.get_queue(queue_name) + for key in tag_keys: + try: + del queue.tags[key] + except KeyError: + pass + sqs_backends = {} for region in boto.sqs.regions(): diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index e0e493ad8..bb21c1e2a 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -2,13 +2,14 @@ from __future__ import unicode_literals from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores +from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( MessageAttributesInvalid, MessageNotInflight, - ReceiptHandleIsInvalid + QueueDoesNotExist, + ReceiptHandleIsInvalid, ) MAXIMUM_VISIBILTY_TIMEOUT = 43200 @@ -39,18 +40,23 @@ class SQSResponse(BaseResponse): queue_name = self.path.split("/")[-1] return queue_name - def _get_validated_visibility_timeout(self): + def _get_validated_visibility_timeout(self, timeout=None): """ :raises ValueError: If specified visibility timeout exceeds MAXIMUM_VISIBILTY_TIMEOUT :raises TypeError: If visibility timeout was not specified """ - visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0]) + if timeout is not None: + visibility_timeout = int(timeout) + else: + visibility_timeout = int(self.querystring.get("VisibilityTimeout")[0]) if visibility_timeout > MAXIMUM_VISIBILTY_TIMEOUT: raise ValueError return visibility_timeout + @amz_crc32 # crc last as request_id can edit XML + @amzn_request_id def call_action(self): status_code, headers, body = super(SQSResponse, self).call_action() if status_code == 404: @@ -76,7 +82,12 @@ class SQSResponse(BaseResponse): def get_queue_url(self): request_url = urlparse(self.uri) queue_name = self._get_param("QueueName") - queue = self.sqs_backend.get_queue(queue_name) + + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) + if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) return template.render(queue=queue, request_url=request_url) @@ -111,9 +122,56 @@ class SQSResponse(BaseResponse): template = self.response_template(CHANGE_MESSAGE_VISIBILITY_RESPONSE) return template.render() + def change_message_visibility_batch(self): + queue_name = self._get_queue_name() + entries = self._get_list_prefix('ChangeMessageVisibilityBatchRequestEntry') + + success = [] + error = [] + for entry in entries: + try: + visibility_timeout = self._get_validated_visibility_timeout(entry['visibility_timeout']) + except ValueError: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'true', + 'Code': 'InvalidParameterValue', + 'Message': 'Visibility timeout invalid' + }) + continue + + try: + self.sqs_backend.change_message_visibility( + queue_name=queue_name, + receipt_handle=entry['receipt_handle'], + visibility_timeout=visibility_timeout + ) + success.append(entry['id']) + except ReceiptHandleIsInvalid as e: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'true', + 'Code': 'ReceiptHandleIsInvalid', + 'Message': e.description + }) + except MessageNotInflight as e: + error.append({ + 'Id': entry['id'], + 'SenderFault': 'false', + 'Code': 'AWS.SimpleQueueService.MessageNotInflight', + 'Message': e.description + }) + + template = self.response_template(CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE) + return template.render(success=success, errors=error) + def get_queue_attributes(self): queue_name = self._get_queue_name() - queue = self.sqs_backend.get_queue(queue_name) + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) + template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE) return template.render(queue=queue) @@ -250,7 +308,11 @@ class SQSResponse(BaseResponse): def receive_message(self): queue_name = self._get_queue_name() - queue = self.sqs_backend.get_queue(queue_name) + + try: + queue = self.sqs_backend.get_queue(queue_name) + except QueueDoesNotExist as e: + return self._error('QueueDoesNotExist', e.description) try: message_count = int(self.querystring.get("MaxNumberOfMessages")[0]) @@ -272,8 +334,62 @@ class SQSResponse(BaseResponse): messages = self.sqs_backend.receive_messages( queue_name, message_count, wait_time, visibility_timeout) template = self.response_template(RECEIVE_MESSAGE_RESPONSE) - output = template.render(messages=messages) - return output + return template.render(messages=messages) + + def list_dead_letter_source_queues(self): + request_url = urlparse(self.uri) + queue_name = self._get_queue_name() + + source_queue_urls = self.sqs_backend.list_dead_letter_source_queues(queue_name) + + template = self.response_template(LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE) + return template.render(queues=source_queue_urls, request_url=request_url) + + def add_permission(self): + queue_name = self._get_queue_name() + actions = self._get_multi_param('ActionName') + account_ids = self._get_multi_param('AWSAccountId') + label = self._get_param('Label') + + self.sqs_backend.add_permission(queue_name, actions, account_ids, label) + + template = self.response_template(ADD_PERMISSION_RESPONSE) + return template.render() + + def remove_permission(self): + queue_name = self._get_queue_name() + label = self._get_param('Label') + + self.sqs_backend.remove_permission(queue_name, label) + + template = self.response_template(REMOVE_PERMISSION_RESPONSE) + return template.render() + + def tag_queue(self): + queue_name = self._get_queue_name() + tags = self._get_map_prefix('Tag', key_end='.Key', value_end='.Value') + + self.sqs_backend.tag_queue(queue_name, tags) + + template = self.response_template(TAG_QUEUE_RESPONSE) + return template.render() + + def untag_queue(self): + queue_name = self._get_queue_name() + tag_keys = self._get_multi_param('TagKey') + + self.sqs_backend.untag_queue(queue_name, tag_keys) + + template = self.response_template(UNTAG_QUEUE_RESPONSE) + return template.render() + + def list_queue_tags(self): + queue_name = self._get_queue_name() + + queue = self.sqs_backend.get_queue(queue_name) + + template = self.response_template(LIST_QUEUE_TAGS_RESPONSE) + return template.render(tags=queue.tags) CREATE_QUEUE_RESPONSE = """ @@ -282,7 +398,7 @@ CREATE_QUEUE_RESPONSE = """ {{ queue.visibility_timeout }} - 7a62c49f-347e-4fc4-9331-6e8e7a96aa73 + {{ requestid }} """ @@ -291,7 +407,7 @@ GET_QUEUE_URL_RESPONSE = """ {{ queue.url(request_url) }} - 470a6f13-2ed9-4181-ad8a-2fdea142988e + {{ requestid }} """ @@ -302,13 +418,13 @@ LIST_QUEUES_RESPONSE = """ {% endfor %} - 725275ae-0b9b-4762-b238-436d7c65a1ac + {{ requestid }} """ DELETE_QUEUE_RESPONSE = """ - 6fde8d1e-52cd-4581-8cd9-c512f4c64223 + {{ requestid }} """ @@ -322,13 +438,13 @@ GET_QUEUE_ATTRIBUTES_RESPONSE = """ {% endfor %} - 1ea71be5-b5a2-4f9d-b85a-945d8d08cd0b + {{ requestid }} """ SET_QUEUE_ATTRIBUTE_RESPONSE = """ - e5cca473-4fc0-4198-a451-8abb94d02c75 + {{ requestid }} """ @@ -345,7 +461,7 @@ SEND_MESSAGE_RESPONSE = """ - 27daac76-34dd-47df-bd01-1f6e873584a0 + {{ requestid }} """ @@ -393,7 +509,7 @@ RECEIVE_MESSAGE_RESPONSE = """ {% endfor %} - b6633655-283d-45b4-aee4-4e84e0ae6afa + {{ requestid }} """ @@ -411,13 +527,13 @@ SEND_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - ca1ad5d0-8271-408b-8d0f-1351bf547e74 + {{ requestid }} """ DELETE_MESSAGE_RESPONSE = """ - b5293cb5-d306-4a17-9048-b263635abe42 + {{ requestid }} """ @@ -430,22 +546,92 @@ DELETE_MESSAGE_BATCH_RESPONSE = """ {% endfor %} - d6f86b7a-74d1-4439-b43f-196a1e29cd85 + {{ requestid }} """ CHANGE_MESSAGE_VISIBILITY_RESPONSE = """ - 6a7a282a-d013-4a59-aba9-335b0fa48bed + {{ requestid }} """ +CHANGE_MESSAGE_VISIBILITY_BATCH_RESPONSE = """ + + {% for success_id in success %} + + {{ success_id }} + + {% endfor %} + {% for error_dict in errors %} + + {{ error_dict['Id'] }} + {{ error_dict['Code'] }} + {{ error_dict['Message'] }} + {{ error_dict['SenderFault'] }} + + {% endfor %} + + + {{ request_id }} + +""" + PURGE_QUEUE_RESPONSE = """ - 6fde8d1e-52cd-4581-8cd9-c512f4c64223 + {{ requestid }} """ +LIST_DEAD_LETTER_SOURCE_QUEUES_RESPONSE = """ + + {% for queue in queues %} + {{ queue.url(request_url) }} + {% endfor %} + + + 8ffb921f-b85e-53d9-abcf-d8d0057f38fc + +""" + +ADD_PERMISSION_RESPONSE = """ + + {{ request_id }} + +""" + +REMOVE_PERMISSION_RESPONSE = """ + + {{ request_id }} + +""" + +TAG_QUEUE_RESPONSE = """ + + {{ request_id }} + +""" + +UNTAG_QUEUE_RESPONSE = """ + + {{ request_id }} + +""" + +LIST_QUEUE_TAGS_RESPONSE = """ + + {% for key, value in tags.items() %} + + {{ key }} + {{ value }} + + {% endfor %} + + + {{ request_id }} + +""" + ERROR_TOO_LONG_RESPONSE = """ Sender diff --git a/moto/ssm/models.py b/moto/ssm/models.py index a0e4a2155..d8dc10a4b 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -5,13 +5,17 @@ from collections import defaultdict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends +import time + class Parameter(BaseModel): - def __init__(self, name, value, type, description, keyid): + def __init__(self, name, value, type, description, keyid, last_modified_date, version): self.name = name self.type = type self.description = description self.keyid = keyid + self.last_modified_date = last_modified_date + self.version = version if self.type == 'SecureString': self.value = self.encrypt(value) @@ -33,8 +37,20 @@ class Parameter(BaseModel): r = { 'Name': self.name, 'Type': self.type, - 'Value': self.decrypt(self.value) if decrypt else self.value + 'Value': self.decrypt(self.value) if decrypt else self.value, + 'Version': self.version, } + + return r + + def describe_response_object(self, decrypt=False): + r = self.response_object(decrypt) + r['LastModifiedDate'] = int(self.last_modified_date) + r['LastModifiedUser'] = 'N/A' + + if self.description: + r['Description'] = self.description + if self.keyid: r['KeyId'] = self.keyid return r @@ -75,16 +91,39 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result + def get_parameters_by_path(self, path, with_decryption, recursive): + """Implement the get-parameters-by-path-API in the backend.""" + result = [] + # path could be with or without a trailing /. we handle this + # difference here. + path = path.rstrip('/') + '/' + for param in self._parameters: + if not param.startswith(path): + continue + if '/' in param[len(path) + 1:] and not recursive: + continue + result.append(self._parameters[param]) + + return result + def get_parameter(self, name, with_decryption): if name in self._parameters: return self._parameters[name] return None def put_parameter(self, name, description, value, type, keyid, overwrite): - if not overwrite and name in self._parameters: - return + previous_parameter = self._parameters.get(name) + version = 1 + + if previous_parameter: + version = previous_parameter.version + 1 + + if not overwrite: + return + + last_modified_date = time.time() self._parameters[name] = Parameter( - name, value, type, description, keyid) + name, value, type, description, keyid, last_modified_date, version) def add_tags_to_resource(self, resource_type, resource_id, tags): for key, value in tags.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 3b75ada09..0b4ca3b65 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -81,6 +81,25 @@ class SimpleSystemManagerResponse(BaseResponse): response['InvalidParameters'].append(name) return json.dumps(response) + def get_parameters_by_path(self): + path = self._get_param('Path') + with_decryption = self._get_param('WithDecryption') + recursive = self._get_param('Recursive', False) + + result = self.ssm_backend.get_parameters_by_path( + path, with_decryption, recursive + ) + + response = { + 'Parameters': [], + } + + for parameter in result: + param_data = parameter.response_object(with_decryption) + response['Parameters'].append(param_data) + + return json.dumps(response) + def describe_parameters(self): page_size = 10 filters = self._get_param('Filters') @@ -98,7 +117,7 @@ class SimpleSystemManagerResponse(BaseResponse): end = token + page_size for parameter in result[token:]: - param_data = parameter.response_object(False) + param_data = parameter.describe_response_object(False) add = False if filters: diff --git a/moto/xray/__init__.py b/moto/xray/__init__.py index 7b32ca0b0..41f00af58 100644 --- a/moto/xray/__init__.py +++ b/moto/xray/__init__.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals from .models import xray_backends from ..core.models import base_decorator +from .mock_client import mock_xray_client, XRaySegment # noqa xray_backend = xray_backends['us-east-1'] mock_xray = base_decorator(xray_backends) diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py new file mode 100644 index 000000000..6e2164d63 --- /dev/null +++ b/moto/xray/mock_client.py @@ -0,0 +1,83 @@ +from functools import wraps +import os +from moto.xray import xray_backends +import aws_xray_sdk.core +from aws_xray_sdk.core.context import Context as AWSContext +from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter + + +class MockEmitter(UDPEmitter): + """ + Replaces the code that sends UDP to local X-Ray daemon + """ + def __init__(self, daemon_address='127.0.0.1:2000'): + address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address) + self._ip, self._port = self._parse_address(address) + + def _xray_backend(self, region): + return xray_backends[region] + + def send_entity(self, entity): + # Hack to get region + # region = entity.subsegments[0].aws['region'] + # xray = self._xray_backend(region) + + # TODO store X-Ray data, pretty sure X-Ray needs refactor for this + pass + + def _send_data(self, data): + raise RuntimeError('Should not be running this') + + +def mock_xray_client(f): + """ + Mocks the X-Ray sdk by pwning its evil singleton with our methods + + The X-Ray SDK has normally been imported and `patched()` called long before we start mocking. + This means the Context() will be very unhappy if an env var isnt present, so we set that, save + the old context, then supply our new context. + We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing + that itno the recorder instance. + """ + @wraps(f) + def _wrapped(*args, **kwargs): + print("Starting X-Ray Patch") + + old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING') + os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR' + old_xray_context = aws_xray_sdk.core.xray_recorder._context + old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter + aws_xray_sdk.core.xray_recorder._context = AWSContext() + aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() + + try: + f(*args, **kwargs) + finally: + + if old_xray_context_var is None: + del os.environ['AWS_XRAY_CONTEXT_MISSING'] + else: + os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var + + aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter + aws_xray_sdk.core.xray_recorder._context = old_xray_context + + return _wrapped + + +class XRaySegment(object): + """ + XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark + the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated + by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop + the segment, thus causing it to be emitted via UDP. + + During testing we're going to have to control the start and end of a segment via context managers. + """ + def __enter__(self): + aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1) + + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + aws_xray_sdk.core.xray_recorder.end_segment() diff --git a/requirements-dev.txt b/requirements-dev.txt index 1c001305e..cdd88ab2f 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,7 +3,7 @@ mock nose sure==1.2.24 coverage -flake8 +flake8==3.4.1 freezegun flask boto>=2.45.0 diff --git a/scripts/get_amis.py b/scripts/get_amis.py new file mode 100644 index 000000000..81f69c5dd --- /dev/null +++ b/scripts/get_amis.py @@ -0,0 +1,40 @@ +import boto3 +import json + +# Taken from free tear list when creating an instance +instances = [ + 'ami-760aaa0f', 'ami-bb9a6bc2', 'ami-35e92e4c', 'ami-785db401', 'ami-b7e93bce', 'ami-dca37ea5', 'ami-999844e0', + 'ami-9b32e8e2', 'ami-f8e54081', 'ami-bceb39c5', 'ami-03cf127a', 'ami-1ecc1e67', 'ami-c2ff2dbb', 'ami-12c6146b', + 'ami-d1cb19a8', 'ami-61db0918', 'ami-56ec3e2f', 'ami-84ee3cfd', 'ami-86ee3cff', 'ami-f0e83a89', 'ami-1f12c066', + 'ami-afee3cd6', 'ami-1812c061', 'ami-77ed3f0e', 'ami-3bf32142', 'ami-6ef02217', 'ami-f4cf1d8d', 'ami-3df32144', + 'ami-c6f321bf', 'ami-24f3215d', 'ami-fa7cdd89', 'ami-1e749f67', 'ami-a9cc1ed0', 'ami-8104a4f8' +] + +client = boto3.client('ec2', region_name='eu-west-1') + +test = client.describe_images(ImageIds=instances) + +result = [] +for image in test['Images']: + try: + tmp = { + 'ami_id': image['ImageId'], + 'name': image['Name'], + 'description': image['Description'], + 'owner_id': image['OwnerId'], + 'public': image['Public'], + 'virtualization_type': image['VirtualizationType'], + 'architecture': image['Architecture'], + 'state': image['State'], + 'platform': image.get('Platform'), + 'image_type': image['ImageType'], + 'hypervisor': image['Hypervisor'], + 'root_device_name': image['RootDeviceName'], + 'root_device_type': image['RootDeviceType'], + 'sriov': image.get('SriovNetSupport', 'simple') + } + result.append(tmp) + except Exception as err: + pass + +print(json.dumps(result, indent=2)) diff --git a/scripts/implementation_coverage.py b/scripts/implementation_coverage.py index f0d22fc95..13175e9d1 100755 --- a/scripts/implementation_coverage.py +++ b/scripts/implementation_coverage.py @@ -44,7 +44,7 @@ def calculate_implementation_coverage(): def print_implementation_coverage(): coverage = calculate_implementation_coverage() - for service_name in coverage: + for service_name in sorted(coverage): implemented = coverage.get(service_name)['implemented'] not_implemented = coverage.get(service_name)['not_implemented'] operations = sorted(implemented + not_implemented) @@ -56,14 +56,14 @@ def print_implementation_coverage(): else: percentage_implemented = 0 - print("-----------------------") - print("{} - {}% implemented".format(service_name, percentage_implemented)) - print("-----------------------") + print("") + print("## {} - {}% implemented".format(service_name, percentage_implemented)) for op in operations: if op in implemented: - print("[X] {}".format(op)) + print("- [X] {}".format(op)) else: - print("[ ] {}".format(op)) + print("- [ ] {}".format(op)) + if __name__ == '__main__': print_implementation_coverage() diff --git a/scripts/scaffold.py b/scripts/scaffold.py index b1c9f3a0f..6c83eeb50 100755 --- a/scripts/scaffold.py +++ b/scripts/scaffold.py @@ -81,12 +81,14 @@ def select_service_and_operation(): raise click.Abort() return service_name, operation_name +def get_escaped_service(service): + return service.replace('-', '') def get_lib_dir(service): - return os.path.join('moto', service) + return os.path.join('moto', get_escaped_service(service)) def get_test_dir(service): - return os.path.join('tests', 'test_{}'.format(service)) + return os.path.join('tests', 'test_{}'.format(get_escaped_service(service))) def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None): @@ -117,7 +119,7 @@ def append_mock_to_init_py(service): filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from .{} import mock_{} # flake8: noqa'.format(service, service) + new_line = 'from .{} import mock_{} # flake8: noqa'.format(get_escaped_service(service), get_escaped_service(service)) lines.insert(last_import_line_index + 1, new_line) body = '\n'.join(lines) + '\n' @@ -135,7 +137,7 @@ def append_mock_import_to_backends_py(service): filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)] last_import_line_index = lines.index(filtered_lines[-1]) - new_line = 'from moto.{} import {}_backends'.format(service, service) + new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service)) lines.insert(last_import_line_index + 1, new_line) body = '\n'.join(lines) + '\n' @@ -147,13 +149,12 @@ def append_mock_dict_to_backends_py(service): with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] - # 'xray': xray_backends if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)): return filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)] last_elem_line_index = lines.index(filtered_lines[-1]) - new_line = " '{}': {}_backends,".format(service, service) + new_line = " '{}': {}_backends,".format(service, get_escaped_service(service)) prev_line = lines[last_elem_line_index] if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' @@ -166,8 +167,8 @@ def append_mock_dict_to_backends_py(service): def initialize_service(service, operation, api_protocol): """create lib and test dirs if not exist """ - lib_dir = os.path.join('moto', service) - test_dir = os.path.join('tests', 'test_{}'.format(service)) + lib_dir = get_lib_dir(service) + test_dir = get_test_dir(service) print_progress('Initializing service', service, 'green') @@ -178,7 +179,9 @@ def initialize_service(service, operation, api_protocol): tmpl_context = { 'service': service, 'service_class': service_class, - 'endpoint_prefix': endpoint_prefix + 'endpoint_prefix': endpoint_prefix, + 'api_protocol': api_protocol, + 'escaped_service': get_escaped_service(service) } # initialize service directory @@ -202,7 +205,7 @@ def initialize_service(service, operation, api_protocol): os.makedirs(test_dir) tmpl_dir = os.path.join(TEMPLATE_DIR, 'test') for tmpl_filename in os.listdir(tmpl_dir): - alt_filename = 'test_{}.py'.format(service) if tmpl_filename == 'test_service.py.j2' else None + alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None render_template( tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename ) @@ -212,9 +215,16 @@ def initialize_service(service, operation, api_protocol): append_mock_import_to_backends_py(service) append_mock_dict_to_backends_py(service) + def to_upper_camel_case(s): return ''.join([_.title() for _ in s.split('_')]) + +def to_lower_camel_case(s): + words = s.split('_') + return ''.join(words[:1] + [_.title() for _ in words[1:]]) + + def to_snake_case(s): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() @@ -229,25 +239,28 @@ def get_function_in_responses(service, operation, protocol): aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) - outputs = op_model.output_shape.members + if not hasattr(op_model.output_shape, 'members'): + outputs = {} + else: + outputs = op_model.output_shape.members inputs = op_model.input_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] - body = 'def {}(self):\n'.format(operation) + body = '\ndef {}(self):\n'.format(operation) for input_name, input_type in inputs.items(): type_name = input_type.type_name if type_name == 'integer': - arg_line_tmpl = ' {} = _get_int_param("{}")\n' + arg_line_tmpl = ' {} = self._get_int_param("{}")\n' elif type_name == 'list': arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n' else: arg_line_tmpl = ' {} = self._get_param("{}")\n' body += arg_line_tmpl.format(to_snake_case(input_name), input_name) if output_names: - body += ' {} = self.{}_backend.{}(\n'.format(','.join(output_names), service, operation) + body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation) else: - body += ' self.{}_backend.{}(\n'.format(service, operation) + body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation) for input_name in input_names: body += ' {}={},\n'.format(input_name, input_name) @@ -255,11 +268,11 @@ def get_function_in_responses(service, operation, protocol): if protocol == 'query': body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper()) body += ' return template.render({})\n'.format( - ','.join(['{}={}'.format(_, _) for _ in output_names]) + ', '.join(['{}={}'.format(_, _) for _ in output_names]) ) - elif protocol == 'json': - body += ' # TODO: adjust reponse\n' - body += ' return json.dumps({})\n'.format(','.join(['{}={}'.format(_, _) for _ in output_names])) + elif protocol in ['json', 'rest-json']: + body += ' # TODO: adjust response\n' + body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names])) return body @@ -272,7 +285,10 @@ def get_function_in_models(service, operation): aws_operation_name = to_upper_camel_case(operation) op_model = client._service_model.operation_model(aws_operation_name) inputs = op_model.input_shape.members - outputs = op_model.output_shape.members + if not hasattr(op_model.output_shape, 'members'): + outputs = {} + else: + outputs = op_model.output_shape.members input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND] output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND] if input_names: @@ -280,7 +296,7 @@ def get_function_in_models(service, operation): else: body = 'def {}(self)\n' body += ' # implement here\n' - body += ' return {}\n'.format(', '.join(output_names)) + body += ' return {}\n\n'.format(', '.join(output_names)) return body @@ -388,13 +404,13 @@ def insert_code_to_class(path, base_class, new_code): f.write(body) -def insert_url(service, operation): +def insert_url(service, operation, api_protocol): client = boto3.client(service) service_class = client.__class__.__name__ aws_operation_name = to_upper_camel_case(operation) uri = client._service_model.operation_model(aws_operation_name).http['requestUri'] - path = os.path.join(os.path.dirname(__file__), '..', 'moto', service, 'urls.py') + path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py') with open(path) as f: lines = [_.replace('\n', '') for _ in f.readlines()] @@ -413,81 +429,55 @@ def insert_url(service, operation): if not prev_line.endswith('{') and not prev_line.endswith(','): lines[last_elem_line_index] += ',' - new_line = " '{0}%s$': %sResponse.dispatch," % ( - uri, service_class - ) + # generate url pattern + if api_protocol == 'rest-json': + new_line = " '{0}/.*$': response.dispatch," + else: + new_line = " '{0}%s$': %sResponse.dispatch," % ( + uri, service_class + ) + if new_line in lines: + return lines.insert(last_elem_line_index + 1, new_line) body = '\n'.join(lines) + '\n' with open(path, 'w') as f: f.write(body) - -def insert_query_codes(service, operation): - func_in_responses = get_function_in_responses(service, operation, 'query') +def insert_codes(service, operation, api_protocol): + func_in_responses = get_function_in_responses(service, operation, api_protocol) func_in_models = get_function_in_models(service, operation) - template = get_response_query_template(service, operation) - # edit responses.py - responses_path = 'moto/{}/responses.py'.format(service) + responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service)) print_progress('inserting code', responses_path, 'green') insert_code_to_class(responses_path, BaseResponse, func_in_responses) # insert template - with open(responses_path) as f: - lines = [_[:-1] for _ in f.readlines()] - lines += template.splitlines() - with open(responses_path, 'w') as f: - f.write('\n'.join(lines)) + if api_protocol == 'query': + template = get_response_query_template(service, operation) + with open(responses_path) as f: + lines = [_[:-1] for _ in f.readlines()] + lines += template.splitlines() + with open(responses_path, 'w') as f: + f.write('\n'.join(lines)) # edit models.py - models_path = 'moto/{}/models.py'.format(service) + models_path = 'moto/{}/models.py'.format(get_escaped_service(service)) print_progress('inserting code', models_path, 'green') insert_code_to_class(models_path, BaseBackend, func_in_models) # edit urls.py - insert_url(service, operation) + insert_url(service, operation, api_protocol) -def insert_json_codes(service, operation): - func_in_responses = get_function_in_responses(service, operation, 'json') - func_in_models = get_function_in_models(service, operation) - - # edit responses.py - responses_path = 'moto/{}/responses.py'.format(service) - print_progress('inserting code', responses_path, 'green') - insert_code_to_class(responses_path, BaseResponse, func_in_responses) - - # edit models.py - models_path = 'moto/{}/models.py'.format(service) - print_progress('inserting code', models_path, 'green') - insert_code_to_class(models_path, BaseBackend, func_in_models) - - # edit urls.py - insert_url(service, operation) - -def insert_restjson_codes(service, operation): - func_in_models = get_function_in_models(service, operation) - - print_progress('skipping inserting code to responses.py', "dont't know how to implement", 'yellow') - # edit models.py - models_path = 'moto/{}/models.py'.format(service) - print_progress('inserting code', models_path, 'green') - insert_code_to_class(models_path, BaseBackend, func_in_models) - - # edit urls.py - insert_url(service, operation) @click.command() def main(): service, operation = select_service_and_operation() api_protocol = boto3.client(service)._service_model.metadata['protocol'] initialize_service(service, operation, api_protocol) - if api_protocol == 'query': - insert_query_codes(service, operation) - elif api_protocol == 'json': - insert_json_codes(service, operation) - elif api_protocol == 'rest-json': - insert_restjson_codes(service, operation) + + if api_protocol in ['query', 'json', 'rest-json']: + insert_codes(service, operation, api_protocol) else: print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow') diff --git a/scripts/template/lib/__init__.py.j2 b/scripts/template/lib/__init__.py.j2 index 8e5bf50c7..5aade5706 100644 --- a/scripts/template/lib/__init__.py.j2 +++ b/scripts/template/lib/__init__.py.j2 @@ -1,7 +1,7 @@ from __future__ import unicode_literals -from .models import {{ service }}_backends +from .models import {{ escaped_service }}_backends from ..core.models import base_decorator -{{ service }}_backend = {{ service }}_backends['us-east-1'] -mock_{{ service }} = base_decorator({{ service }}_backends) +{{ escaped_service }}_backend = {{ escaped_service }}_backends['us-east-1'] +mock_{{ escaped_service }} = base_decorator({{ escaped_service }}_backends) diff --git a/scripts/template/lib/models.py.j2 b/scripts/template/lib/models.py.j2 index 2a0097c1d..28fa4a4e1 100644 --- a/scripts/template/lib/models.py.j2 +++ b/scripts/template/lib/models.py.j2 @@ -17,4 +17,4 @@ class {{ service_class }}Backend(BaseBackend): available_regions = boto3.session.Session().get_available_regions("{{ service }}") -{{ service }}_backends = {region: {{ service_class }}Backend for region in available_regions} +{{ escaped_service }}_backends = {region: {{ service_class }}Backend(region) for region in available_regions} diff --git a/scripts/template/lib/responses.py.j2 b/scripts/template/lib/responses.py.j2 index 85827e651..60d0048e3 100644 --- a/scripts/template/lib/responses.py.j2 +++ b/scripts/template/lib/responses.py.j2 @@ -1,12 +1,14 @@ from __future__ import unicode_literals from moto.core.responses import BaseResponse -from .models import {{ service }}_backends +from .models import {{ escaped_service }}_backends +import json class {{ service_class }}Response(BaseResponse): + SERVICE_NAME = '{{ service }}' @property - def {{ service }}_backend(self): - return {{ service }}_backends[self.region] + def {{ escaped_service }}_backend(self): + return {{ escaped_service }}_backends[self.region] # add methods from here diff --git a/scripts/template/lib/urls.py.j2 b/scripts/template/lib/urls.py.j2 index 53cc03c0e..47ae52f2d 100644 --- a/scripts/template/lib/urls.py.j2 +++ b/scripts/template/lib/urls.py.j2 @@ -5,5 +5,9 @@ url_bases = [ "https?://{{ endpoint_prefix }}.(.+).amazonaws.com", ] +{% if api_protocol == 'rest-json' %} +response = {{ service_class }}Response() +{% endif %} + url_paths = { } diff --git a/scripts/template/test/test_server.py.j2 b/scripts/template/test/test_server.py.j2 index f3963a743..c85dbf01c 100644 --- a/scripts/template/test/test_server.py.j2 +++ b/scripts/template/test/test_server.py.j2 @@ -3,14 +3,14 @@ from __future__ import unicode_literals import sure # noqa import moto.server as server -from moto import mock_{{ service }} +from moto import mock_{{ escaped_service }} ''' Test the different server responses ''' -@mock_{{ service }} -def test_{{ service }}_list(): +@mock_{{ escaped_service }} +def test_{{ escaped_service }}_list(): backend = server.create_backend_app("{{ service }}") test_client = backend.test_client() # do test diff --git a/scripts/template/test/test_service.py.j2 b/scripts/template/test/test_service.py.j2 index 076f92e27..799f6079f 100644 --- a/scripts/template/test/test_service.py.j2 +++ b/scripts/template/test/test_service.py.j2 @@ -2,10 +2,10 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_{{ service }} +from moto import mock_{{ escaped_service }} -@mock_{{ service }} +@mock_{{ escaped_service }} def test_list(): # do test pass diff --git a/setup.py b/setup.py index 3f6804ce0..201622627 100755 --- a/setup.py +++ b/setup.py @@ -9,6 +9,7 @@ install_requires = [ "Jinja2>=2.8", "boto>=2.36.0", "boto3>=1.2.1", + "botocore>=1.7.12", "cookies", "cryptography>=2.0.0", "requests>=2.5", @@ -19,7 +20,9 @@ install_requires = [ "pytz", "python-dateutil<3.0.0,>=2.1", "mock", - "docker>=2.5.1" + "docker>=2.5.1", + "jsondiff==1.1.1", + "aws-xray-sdk>=0.93", ] extras_require = { @@ -36,7 +39,7 @@ else: setup( name='moto', - version='1.1.21', + version='1.1.25', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_acm/test_acm.py b/tests/test_acm/test_acm.py index 96e362d1e..ccac48181 100644 --- a/tests/test_acm/test_acm.py +++ b/tests/test_acm/test_acm.py @@ -4,6 +4,7 @@ import os import boto3 from freezegun import freeze_time import sure # noqa +import uuid from botocore.exceptions import ClientError @@ -281,12 +282,37 @@ def test_resend_validation_email_invalid(): def test_request_certificate(): client = boto3.client('acm', region_name='eu-central-1') + token = str(uuid.uuid4()) + resp = client.request_certificate( DomainName='google.com', + IdempotencyToken=token, SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], ) resp.should.contain('CertificateArn') + arn = resp['CertificateArn'] + resp = client.request_certificate( + DomainName='google.com', + IdempotencyToken=token, + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + resp['CertificateArn'].should.equal(arn) + + +@mock_acm +def test_request_certificate_no_san(): + client = boto3.client('acm', region_name='eu-central-1') + + resp = client.request_certificate( + DomainName='google.com' + ) + resp.should.contain('CertificateArn') + + resp2 = client.describe_certificate( + CertificateArn=resp['CertificateArn'] + ) + resp2.should.contain('Certificate') # # Also tests the SAN code # # requires Pull: https://github.com/spulec/freezegun/pull/210 diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index b919eb71c..5ed6c3aa5 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -8,7 +8,7 @@ from boto.ec2.autoscale import Tag import boto.ec2.elb import sure # noqa -from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_autoscaling_deprecated, mock_ec2 +from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte @@ -311,6 +311,7 @@ def test_autoscaling_group_describe_instances(): instances = list(conn.get_all_autoscaling_instances()) instances.should.have.length_of(2) instances[0].launch_config_name.should.equal('tester') + instances[0].health_status.should.equal('Healthy') autoscale_instance_ids = [instance.instance_id for instance in instances] ec2_conn = boto.connect_ec2() @@ -484,6 +485,173 @@ Boto3 ''' +@mock_autoscaling +@mock_elb +def test_describe_load_balancers(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(1) + response['LoadBalancers'][0]['LoadBalancerName'].should.equal('my-lb') + +@mock_autoscaling +@mock_elb +def test_create_elb_and_autoscaling_group_no_relationship(): + INSTANCE_COUNT = 2 + ELB_NAME = 'my-elb' + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName=ELB_NAME, + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + ) + + # autoscaling group and elb should have no relationship + response = client.describe_load_balancers( + AutoScalingGroupName='test_asg' + ) + list(response['LoadBalancers']).should.have.length_of(0) + response = elb_client.describe_load_balancers( + LoadBalancerNames=[ELB_NAME] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + + +@mock_autoscaling +@mock_elb +def test_attach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.attach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(INSTANCE_COUNT) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=["test_asg"] + ) + list(response['AutoScalingGroups'][0]['LoadBalancerNames']).should.have.length_of(1) + + +@mock_autoscaling +@mock_elb +def test_detach_load_balancer(): + INSTANCE_COUNT = 2 + + elb_client = boto3.client('elb', region_name='us-east-1') + elb_client.create_load_balancer( + LoadBalancerName='my-lb', + Listeners=[ + {'Protocol': 'tcp', 'LoadBalancerPort': 80, 'InstancePort': 8080}], + AvailabilityZones=['us-east-1a', 'us-east-1b'] + ) + + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + LoadBalancerNames=['my-lb'], + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }] + ) + + response = client.detach_load_balancers( + AutoScalingGroupName='test_asg', + LoadBalancerNames=['my-lb']) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = elb_client.describe_load_balancers( + LoadBalancerNames=['my-lb'] + ) + list(response['LoadBalancerDescriptions'][0]['Instances']).should.have.length_of(0) + + response = client.describe_load_balancers(AutoScalingGroupName='test_asg') + list(response['LoadBalancers']).should.have.length_of(0) + + @mock_autoscaling def test_create_autoscaling_group_boto3(): client = boto3.client('autoscaling', region_name='us-east-1') @@ -653,3 +821,200 @@ def test_autoscaling_describe_policies_boto3(): response['ScalingPolicies'].should.have.length_of(1) response['ScalingPolicies'][0][ 'PolicyName'].should.equal('test_policy_down') + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance_decrement(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=True + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(1) + + # test to ensure tag has been removed + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + # test to ensure tag is present on other instance + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_detach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=2, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + instance_to_detach = response['AutoScalingGroups'][0]['Instances'][0]['InstanceId'] + instance_to_keep = response['AutoScalingGroups'][0]['Instances'][1]['InstanceId'] + + ec2_client = boto3.client('ec2', region_name='us-east-1') + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + + response = client.detach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=[instance_to_detach], + ShouldDecrementDesiredCapacity=False + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + # test to ensure instance was replaced + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(2) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_detach]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(1) + + response = ec2_client.describe_instances(InstanceIds=[instance_to_keep]) + tags = response['Reservations'][0]['Instances'][0]['Tags'] + tags.should.have.length_of(2) + +@mock_autoscaling +@mock_ec2 +def test_attach_one_instance(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=4, + DesiredCapacity=2, + Tags=[ + {'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }] + ) + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + ec2 = boto3.resource('ec2', 'us-east-1') + instances_to_add = [x.id for x in ec2.create_instances(ImageId='', MinCount=1, MaxCount=1)] + + response = client.attach_instances( + AutoScalingGroupName='test_asg', + InstanceIds=instances_to_add + ) + response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + response['AutoScalingGroups'][0]['Instances'].should.have.length_of(3) + +@mock_autoscaling +@mock_ec2 +def test_describe_instance_health(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=4, + DesiredCapacity=2, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Healthy') + +@mock_autoscaling +@mock_ec2 +def test_set_instance_health(): + client = boto3.client('autoscaling', region_name='us-east-1') + _ = client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration' + ) + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=2, + MaxSize=4, + DesiredCapacity=2, + ) + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Healthy') + + client.set_instance_health(InstanceId=instance1['InstanceId'], HealthStatus='Unhealthy') + + response = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test_asg'] + ) + + instance1 = response['AutoScalingGroups'][0]['Instances'][0] + instance1['HealthStatus'].should.equal('Unhealthy') diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py new file mode 100644 index 000000000..89ec4a399 --- /dev/null +++ b/tests/test_autoscaling/test_elbv2.py @@ -0,0 +1,131 @@ +from __future__ import unicode_literals +import boto3 + +from moto import mock_autoscaling, mock_ec2, mock_elbv2 + +@mock_elbv2 +@mock_ec2 +@mock_autoscaling +def test_attach_detach_target_groups(): + INSTANCE_COUNT = 2 + client = boto3.client('autoscaling', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = elbv2_client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group_arn = response['TargetGroups'][0]['TargetGroupArn'] + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration') + + # create asg, attach to target group on create + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + TargetGroupARNs=[target_group_arn], + VPCZoneIdentifier=vpc.id) + # create asg without attaching to target group + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg2', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + VPCZoneIdentifier=vpc.id) + + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(1) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + + client.attach_load_balancer_target_groups( + AutoScalingGroupName='test_asg2', + TargetGroupARNs=[target_group_arn]) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT * 2) + + response = client.detach_load_balancer_target_groups( + AutoScalingGroupName='test_asg2', + TargetGroupARNs=[target_group_arn]) + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + +@mock_elbv2 +@mock_ec2 +@mock_autoscaling +def test_detach_all_target_groups(): + INSTANCE_COUNT = 2 + client = boto3.client('autoscaling', region_name='us-east-1') + elbv2_client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = elbv2_client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group_arn = response['TargetGroups'][0]['TargetGroupArn'] + + client.create_launch_configuration( + LaunchConfigurationName='test_launch_configuration') + + client.create_auto_scaling_group( + AutoScalingGroupName='test_asg', + LaunchConfigurationName='test_launch_configuration', + MinSize=0, + MaxSize=INSTANCE_COUNT, + DesiredCapacity=INSTANCE_COUNT, + TargetGroupARNs=[target_group_arn], + VPCZoneIdentifier=vpc.id) + + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(1) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) + + response = client.detach_load_balancer_target_groups( + AutoScalingGroupName='test_asg', + TargetGroupARNs=[target_group_arn]) + + response = elbv2_client.describe_target_health( + TargetGroupArn=target_group_arn) + list(response['TargetHealthDescriptions']).should.have.length_of(0) + response = client.describe_load_balancer_target_groups( + AutoScalingGroupName='test_asg') + list(response['LoadBalancerTargetGroups']).should.have.length_of(0) diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index 317e9f4a2..e7a9f9174 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -12,7 +12,7 @@ import sure # noqa from freezegun import freeze_time from moto import mock_lambda, mock_s3, mock_ec2, settings -_lambda_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' +_lambda_region = 'us-west-2' def _process_lambda(func_str): @@ -220,7 +220,7 @@ def test_create_function_from_aws_bucket(): result.pop('LastModified') result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -265,7 +265,7 @@ def test_create_function_from_zipfile(): result.should.equal({ 'FunctionName': 'testFunction', - 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), + 'FunctionArn': 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), 'Runtime': 'python2.7', 'Role': 'test-iam-role', 'Handler': 'lambda_function.lambda_handler', @@ -317,30 +317,25 @@ def test_get_function(): result['ResponseMetadata'].pop('RetryAttempts', None) result['Configuration'].pop('LastModified') - result.should.equal({ - "Code": { - "Location": "s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip".format(_lambda_region), - "RepositoryType": "S3" - }, - "Configuration": { - "CodeSha256": hashlib.sha256(zip_content).hexdigest(), - "CodeSize": len(zip_content), - "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), - "FunctionName": "testFunction", - "Handler": "lambda_function.lambda_handler", - "MemorySize": 128, - "Role": "test-iam-role", - "Runtime": "python2.7", - "Timeout": 3, - "Version": '$LATEST', - "VpcConfig": { - "SecurityGroupIds": [], - "SubnetIds": [], - } - }, - 'ResponseMetadata': {'HTTPStatusCode': 200}, - }) + result['Code']['Location'].should.equal('s3://awslambda-{0}-tasks.s3-{0}.amazonaws.com/test.zip'.format(_lambda_region)) + result['Code']['RepositoryType'].should.equal('S3') + + result['Configuration']['CodeSha256'].should.equal(hashlib.sha256(zip_content).hexdigest()) + result['Configuration']['CodeSize'].should.equal(len(zip_content)) + result['Configuration']['Description'].should.equal('test lambda function') + result['Configuration'].should.contain('FunctionArn') + result['Configuration']['FunctionName'].should.equal('testFunction') + result['Configuration']['Handler'].should.equal('lambda_function.lambda_handler') + result['Configuration']['MemorySize'].should.equal(128) + result['Configuration']['Role'].should.equal('test-iam-role') + result['Configuration']['Runtime'].should.equal('python2.7') + result['Configuration']['Timeout'].should.equal(3) + result['Configuration']['Version'].should.equal('$LATEST') + result['Configuration'].should.contain('VpcConfig') + + # Test get function with + result = conn.get_function(FunctionName='testFunction', Qualifier='$LATEST') + result['Configuration']['Version'].should.equal('$LATEST') @mock_lambda @@ -380,6 +375,52 @@ def test_delete_function(): FunctionName='testFunctionThatDoesntExist').should.throw(botocore.client.ClientError) +@mock_lambda +@mock_s3 +def test_publish(): + s3_conn = boto3.client('s3', 'us-west-2') + s3_conn.create_bucket(Bucket='test-bucket') + + zip_content = get_test_zip_file2() + s3_conn.put_object(Bucket='test-bucket', Key='test.zip', Body=zip_content) + conn = boto3.client('lambda', 'us-west-2') + + conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'S3Bucket': 'test-bucket', + 'S3Key': 'test.zip', + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + latest_arn = function_list['Functions'][0]['FunctionArn'] + + conn.publish_version(FunctionName='testFunction') + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(2) + + # #SetComprehension ;-) + published_arn = list({f['FunctionArn'] for f in function_list['Functions']} - {latest_arn})[0] + published_arn.should.contain('testFunction:1') + + conn.delete_function(FunctionName='testFunction', Qualifier='1') + + function_list = conn.list_functions() + function_list['Functions'].should.have.length_of(1) + function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') + + + @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') @@ -420,7 +461,7 @@ def test_list_create_list_get_delete_list(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.lambda_handler", "MemorySize": 128, @@ -488,6 +529,7 @@ def lambda_handler(event, context): assert 'FunctionError' in result assert result['FunctionError'] == 'Handled' + @mock_lambda @mock_s3 def test_tags(): @@ -554,6 +596,7 @@ def test_tags(): TagKeys=['spam'] )['ResponseMetadata']['HTTPStatusCode'].should.equal(204) + @mock_lambda def test_tags_not_found(): """ @@ -574,6 +617,7 @@ def test_tags_not_found(): TagKeys=['spam'] ).should.throw(botocore.client.ClientError) + @mock_lambda def test_invoke_async_function(): conn = boto3.client('lambda', 'us-west-2') @@ -581,10 +625,8 @@ def test_invoke_async_function(): FunctionName='testFunction', Runtime='python2.7', Role='test-iam-role', - Handler='lambda_function.handler', - Code={ - 'ZipFile': get_test_zip_file1(), - }, + Handler='lambda_function.lambda_handler', + Code={'ZipFile': get_test_zip_file1()}, Description='test lambda function', Timeout=3, MemorySize=128, @@ -593,11 +635,12 @@ def test_invoke_async_function(): success_result = conn.invoke_async( FunctionName='testFunction', - InvokeArgs=json.dumps({ 'test': 'event' }) + InvokeArgs=json.dumps({'test': 'event'}) ) success_result['Status'].should.equal(202) + @mock_lambda @freeze_time('2015-01-01 00:00:00') def test_get_function_created_with_zipfile(): @@ -631,7 +674,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction'.format(_lambda_region), + "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, @@ -646,6 +689,7 @@ def test_get_function_created_with_zipfile(): }, ) + @mock_lambda def add_function_permission(): conn = boto3.client('lambda', 'us-west-2') diff --git a/tests/test_batch/test_batch.py b/tests/test_batch/test_batch.py new file mode 100644 index 000000000..ec24cd911 --- /dev/null +++ b/tests/test_batch/test_batch.py @@ -0,0 +1,809 @@ +from __future__ import unicode_literals + +import time +import datetime +import boto3 +from botocore.exceptions import ClientError +import sure # noqa +from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs +import functools +import nose + + +def expected_failure(test): + @functools.wraps(test) + def inner(*args, **kwargs): + try: + test(*args, **kwargs) + except Exception as err: + raise nose.SkipTest + return inner + +DEFAULT_REGION = 'eu-central-1' + + +def _get_clients(): + return boto3.client('ec2', region_name=DEFAULT_REGION), \ + boto3.client('iam', region_name=DEFAULT_REGION), \ + boto3.client('ecs', region_name=DEFAULT_REGION), \ + boto3.client('logs', region_name=DEFAULT_REGION), \ + boto3.client('batch', region_name=DEFAULT_REGION) + + +def _setup(ec2_client, iam_client): + """ + Do prerequisite setup + :return: VPC ID, Subnet ID, Security group ID, IAM Role ARN + :rtype: tuple + """ + resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24') + vpc_id = resp['Vpc']['VpcId'] + resp = ec2_client.create_subnet( + AvailabilityZone='eu-central-1a', + CidrBlock='172.30.0.0/25', + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + resp = ec2_client.create_security_group( + Description='test_sg_desc', + GroupName='test_sg', + VpcId=vpc_id + ) + sg_id = resp['GroupId'] + + resp = iam_client.create_role( + RoleName='TestRole', + AssumeRolePolicyDocument='some_policy' + ) + iam_arn = resp['Role']['Arn'] + + return vpc_id, subnet_id, sg_id, iam_arn + + +# Yes, yes it talks to all the things +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_managed_compute_environment(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='MANAGED', + state='ENABLED', + computeResources={ + 'type': 'EC2', + 'minvCpus': 5, + 'maxvCpus': 10, + 'desiredvCpus': 5, + 'instanceTypes': [ + 't2.small', + 't2.medium' + ], + 'imageId': 'some_image_id', + 'subnets': [ + subnet_id, + ], + 'securityGroupIds': [ + sg_id, + ], + 'ec2KeyPair': 'string', + 'instanceRole': iam_arn, + 'tags': { + 'string': 'string' + }, + 'bidPercentage': 123, + 'spotIamFleetRole': 'string' + }, + serviceRole=iam_arn + ) + resp.should.contain('computeEnvironmentArn') + resp['computeEnvironmentName'].should.equal(compute_name) + + # Given a t2.medium is 2 vcpu and t2.small is 1, therefore 2 mediums and 1 small should be created + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(3) + + # Should have created 1 ECS cluster + resp = ecs_client.list_clusters() + resp.should.contain('clusterArns') + len(resp['clusterArns']).should.equal(1) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_unmanaged_compute_environment(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + resp.should.contain('computeEnvironmentArn') + resp['computeEnvironmentName'].should.equal(compute_name) + + # Its unmanaged so no instances should be created + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(0) + + # Should have created 1 ECS cluster + resp = ecs_client.list_clusters() + resp.should.contain('clusterArns') + len(resp['clusterArns']).should.equal(1) + +# TODO create 1000s of tests to test complex option combinations of create environment + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_describe_compute_environment(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(1) + resp['computeEnvironments'][0]['computeEnvironmentName'].should.equal(compute_name) + + # Test filtering + resp = batch_client.describe_compute_environments( + computeEnvironments=['test1'] + ) + len(resp['computeEnvironments']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_unmanaged_compute_environment(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + batch_client.delete_compute_environment( + computeEnvironment=compute_name, + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(0) + + resp = ecs_client.list_clusters() + len(resp.get('clusterArns', [])).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_managed_compute_environment(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='MANAGED', + state='ENABLED', + computeResources={ + 'type': 'EC2', + 'minvCpus': 5, + 'maxvCpus': 10, + 'desiredvCpus': 5, + 'instanceTypes': [ + 't2.small', + 't2.medium' + ], + 'imageId': 'some_image_id', + 'subnets': [ + subnet_id, + ], + 'securityGroupIds': [ + sg_id, + ], + 'ec2KeyPair': 'string', + 'instanceRole': iam_arn, + 'tags': { + 'string': 'string' + }, + 'bidPercentage': 123, + 'spotIamFleetRole': 'string' + }, + serviceRole=iam_arn + ) + + batch_client.delete_compute_environment( + computeEnvironment=compute_name, + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(0) + + resp = ec2_client.describe_instances() + resp.should.contain('Reservations') + len(resp['Reservations']).should.equal(3) + for reservation in resp['Reservations']: + reservation['Instances'][0]['State']['Name'].should.equal('terminated') + + resp = ecs_client.list_clusters() + len(resp.get('clusterArns', [])).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_unmanaged_compute_environment_state(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + + batch_client.update_compute_environment( + computeEnvironment=compute_name, + state='DISABLED' + ) + + resp = batch_client.describe_compute_environments() + len(resp['computeEnvironments']).should.equal(1) + resp['computeEnvironments'][0]['state'].should.equal('DISABLED') + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_queue(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + resp.should.contain('jobQueueArn') + resp.should.contain('jobQueueName') + queue_arn = resp['jobQueueArn'] + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) + resp['jobQueues'][0]['jobQueueArn'].should.equal(queue_arn) + + resp = batch_client.describe_job_queues(jobQueues=['test_invalid_queue']) + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_job_queue_bad_arn(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + try: + batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + 'LALALA' + }, + ] + ) + except ClientError as err: + err.response['Error']['Code'].should.equal('ClientException') + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_job_queue(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + batch_client.update_job_queue( + jobQueue=queue_arn, + priority=5 + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(1) + resp['jobQueues'][0]['priority'].should.equal(5) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_update_job_queue(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + batch_client.delete_job_queue( + jobQueue=queue_arn + ) + + resp = batch_client.describe_job_queues() + resp.should.contain('jobQueues') + len(resp['jobQueues']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_register_task_definition(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + resp.should.contain('jobDefinitionArn') + resp.should.contain('jobDefinitionName') + resp.should.contain('revision') + + assert resp['jobDefinitionArn'].endswith('{0}:{1}'.format(resp['jobDefinitionName'], resp['revision'])) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_reregister_task_definition(): + # Reregistering task with the same name bumps the revision number + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp1 = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + resp1.should.contain('jobDefinitionArn') + resp1.should.contain('jobDefinitionName') + resp1.should.contain('revision') + + assert resp1['jobDefinitionArn'].endswith('{0}:{1}'.format(resp1['jobDefinitionName'], resp1['revision'])) + resp1['revision'].should.equal(1) + + resp2 = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 68, + 'command': ['sleep', '10'] + } + ) + resp2['revision'].should.equal(2) + + resp2['jobDefinitionArn'].should_not.equal(resp1['jobDefinitionArn']) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_delete_task_definition(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + + batch_client.deregister_job_definition(jobDefinition=resp['jobDefinitionArn']) + + resp = batch_client.describe_job_definitions() + len(resp['jobDefinitions']).should.equal(0) + + +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_describe_task_definition(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 64, + 'command': ['sleep', '10'] + } + ) + batch_client.register_job_definition( + jobDefinitionName='test1', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 64, + 'command': ['sleep', '10'] + } + ) + + resp = batch_client.describe_job_definitions( + jobDefinitionName='sleep10' + ) + len(resp['jobDefinitions']).should.equal(2) + + resp = batch_client.describe_job_definitions() + len(resp['jobDefinitions']).should.equal(3) + + resp = batch_client.describe_job_definitions( + jobDefinitions=['sleep10', 'test1'] + ) + len(resp['jobDefinitions']).should.equal(3) + + +# SLOW TESTS +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_submit_job(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id = resp['jobId'] + + future = datetime.datetime.now() + datetime.timedelta(seconds=30) + + while datetime.datetime.now() < future: + resp = batch_client.describe_jobs(jobs=[job_id]) + print("{0}:{1} {2}".format(resp['jobs'][0]['jobName'], resp['jobs'][0]['jobId'], resp['jobs'][0]['status'])) + + if resp['jobs'][0]['status'] == 'FAILED': + raise RuntimeError('Batch job failed') + if resp['jobs'][0]['status'] == 'SUCCEEDED': + break + time.sleep(0.5) + else: + raise RuntimeError('Batch job timed out') + + resp = logs_client.describe_log_streams(logGroupName='/aws/batch/job') + len(resp['logStreams']).should.equal(1) + ls_name = resp['logStreams'][0]['logStreamName'] + + resp = logs_client.get_log_events(logGroupName='/aws/batch/job', logStreamName=ls_name) + len(resp['events']).should.be.greater_than(5) + + +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_list_jobs(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id1 = resp['jobId'] + resp = batch_client.submit_job( + jobName='test2', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id2 = resp['jobId'] + + future = datetime.datetime.now() + datetime.timedelta(seconds=30) + + resp_finished_jobs = batch_client.list_jobs( + jobQueue=queue_arn, + jobStatus='SUCCEEDED' + ) + + # Wait only as long as it takes to run the jobs + while datetime.datetime.now() < future: + resp = batch_client.describe_jobs(jobs=[job_id1, job_id2]) + + any_failed_jobs = any([job['status'] == 'FAILED' for job in resp['jobs']]) + succeeded_jobs = all([job['status'] == 'SUCCEEDED' for job in resp['jobs']]) + + if any_failed_jobs: + raise RuntimeError('A Batch job failed') + if succeeded_jobs: + break + time.sleep(0.5) + else: + raise RuntimeError('Batch jobs timed out') + + resp_finished_jobs2 = batch_client.list_jobs( + jobQueue=queue_arn, + jobStatus='SUCCEEDED' + ) + + len(resp_finished_jobs['jobSummaryList']).should.equal(0) + len(resp_finished_jobs2['jobSummaryList']).should.equal(2) + + +@expected_failure +@mock_logs +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_terminate_job(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + compute_name = 'test_compute_env' + resp = batch_client.create_compute_environment( + computeEnvironmentName=compute_name, + type='UNMANAGED', + state='ENABLED', + serviceRole=iam_arn + ) + arn = resp['computeEnvironmentArn'] + + resp = batch_client.create_job_queue( + jobQueueName='test_job_queue', + state='ENABLED', + priority=123, + computeEnvironmentOrder=[ + { + 'order': 123, + 'computeEnvironment': arn + }, + ] + ) + queue_arn = resp['jobQueueArn'] + + resp = batch_client.register_job_definition( + jobDefinitionName='sleep10', + type='container', + containerProperties={ + 'image': 'busybox', + 'vcpus': 1, + 'memory': 128, + 'command': ['sleep', '10'] + } + ) + job_def_arn = resp['jobDefinitionArn'] + + resp = batch_client.submit_job( + jobName='test1', + jobQueue=queue_arn, + jobDefinition=job_def_arn + ) + job_id = resp['jobId'] + + time.sleep(2) + + batch_client.terminate_job(jobId=job_id, reason='test_terminate') + + time.sleep(1) + + resp = batch_client.describe_jobs(jobs=[job_id]) + resp['jobs'][0]['jobName'].should.equal('test1') + resp['jobs'][0]['status'].should.equal('FAILED') + resp['jobs'][0]['statusReason'].should.equal('test_terminate') + diff --git a/tests/test_batch/test_cloudformation.py b/tests/test_batch/test_cloudformation.py new file mode 100644 index 000000000..1e37aa3a6 --- /dev/null +++ b/tests/test_batch/test_cloudformation.py @@ -0,0 +1,247 @@ +from __future__ import unicode_literals + +import time +import datetime +import boto3 +from botocore.exceptions import ClientError +import sure # noqa +from moto import mock_batch, mock_iam, mock_ec2, mock_ecs, mock_logs, mock_cloudformation +import functools +import nose +import json + +DEFAULT_REGION = 'eu-central-1' + + +def _get_clients(): + return boto3.client('ec2', region_name=DEFAULT_REGION), \ + boto3.client('iam', region_name=DEFAULT_REGION), \ + boto3.client('ecs', region_name=DEFAULT_REGION), \ + boto3.client('logs', region_name=DEFAULT_REGION), \ + boto3.client('batch', region_name=DEFAULT_REGION) + + +def _setup(ec2_client, iam_client): + """ + Do prerequisite setup + :return: VPC ID, Subnet ID, Security group ID, IAM Role ARN + :rtype: tuple + """ + resp = ec2_client.create_vpc(CidrBlock='172.30.0.0/24') + vpc_id = resp['Vpc']['VpcId'] + resp = ec2_client.create_subnet( + AvailabilityZone='eu-central-1a', + CidrBlock='172.30.0.0/25', + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + resp = ec2_client.create_security_group( + Description='test_sg_desc', + GroupName='test_sg', + VpcId=vpc_id + ) + sg_id = resp['GroupId'] + + resp = iam_client.create_role( + RoleName='TestRole', + AssumeRolePolicyDocument='some_policy' + ) + iam_arn = resp['Role']['Arn'] + + return vpc_id, subnet_id, sg_id, iam_arn + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_env_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + } + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + + stack_resources['StackResourceSummaries'][0]['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN + stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].startswith('arn:aws:batch:') + stack_resources['StackResourceSummaries'][0]['PhysicalResourceId'].should.contain('test_stack') + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_queue_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + }, + + "JobQueue": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "Priority": 1, + "ComputeEnvironmentOrder": [ + { + "Order": 1, + "ComputeEnvironment": {"Ref": "ComputeEnvironment"} + } + ] + } + }, + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + len(stack_resources['StackResourceSummaries']).should.equal(2) + + job_queue_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobQueue', stack_resources['StackResourceSummaries']))[0] + + job_queue_resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN + job_queue_resource['PhysicalResourceId'].startswith('arn:aws:batch:') + job_queue_resource['PhysicalResourceId'].should.contain('test_stack') + job_queue_resource['PhysicalResourceId'].should.contain('job-queue/') + + +@mock_cloudformation() +@mock_ec2 +@mock_ecs +@mock_iam +@mock_batch +def test_create_job_def_cf(): + ec2_client, iam_client, ecs_client, logs_client, batch_client = _get_clients() + vpc_id, subnet_id, sg_id, iam_arn = _setup(ec2_client, iam_client) + + create_environment_template = { + 'Resources': { + "ComputeEnvironment": { + "Type": "AWS::Batch::ComputeEnvironment", + "Properties": { + "Type": "MANAGED", + "ComputeResources": { + "Type": "EC2", + "MinvCpus": 0, + "DesiredvCpus": 0, + "MaxvCpus": 64, + "InstanceTypes": [ + "optimal" + ], + "Subnets": [subnet_id], + "SecurityGroupIds": [sg_id], + "InstanceRole": iam_arn + }, + "ServiceRole": iam_arn + } + }, + + "JobQueue": { + "Type": "AWS::Batch::JobQueue", + "Properties": { + "Priority": 1, + "ComputeEnvironmentOrder": [ + { + "Order": 1, + "ComputeEnvironment": {"Ref": "ComputeEnvironment"} + } + ] + } + }, + + "JobDefinition": { + "Type": "AWS::Batch::JobDefinition", + "Properties": { + "Type": "container", + "ContainerProperties": { + "Image": { + "Fn::Join": ["", ["137112412989.dkr.ecr.", {"Ref": "AWS::Region"}, ".amazonaws.com/amazonlinux:latest"]] + }, + "Vcpus": 2, + "Memory": 2000, + "Command": ["echo", "Hello world"] + }, + "RetryStrategy": { + "Attempts": 1 + } + } + }, + } + } + cf_json = json.dumps(create_environment_template) + + cf_conn = boto3.client('cloudformation', DEFAULT_REGION) + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=cf_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + len(stack_resources['StackResourceSummaries']).should.equal(3) + + job_def_resource = list(filter(lambda item: item['ResourceType'] == 'AWS::Batch::JobDefinition', stack_resources['StackResourceSummaries']))[0] + + job_def_resource['ResourceStatus'].should.equal('CREATE_COMPLETE') + # Spot checks on the ARN + job_def_resource['PhysicalResourceId'].startswith('arn:aws:batch:') + job_def_resource['PhysicalResourceId'].should.contain('test_stack-JobDef') + job_def_resource['PhysicalResourceId'].should.contain('job-definition/') diff --git a/tests/test_batch/test_server.py b/tests/test_batch/test_server.py new file mode 100644 index 000000000..4a74260a8 --- /dev/null +++ b/tests/test_batch/test_server.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_batch + +''' +Test the different server responses +''' + + +@mock_batch +def test_batch_list(): + backend = server.create_backend_app("batch") + test_client = backend.test_client() + + res = test_client.get('/v1/describecomputeenvironments') + res.status_code.should.equal(200) diff --git a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py index 5e66bbd86..43a11104b 100644 --- a/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py +++ b/tests/test_cloudformation/fixtures/route53_ec2_instance_with_public_ip.py @@ -1,6 +1,13 @@ from __future__ import unicode_literals template = { + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "Ec2Instance": { "Type": "AWS::EC2::Instance", @@ -13,20 +20,20 @@ template = { "HostedZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": { "Fn::Join": ["", [ {"Ref": "Ec2Instance"}, ".", {"Ref": "AWS::Region"}, ".", - {"Ref": "HostedZone"}, "." + {"Ref": "R53ZoneName"}, "." ]] }, "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_health_check.py b/tests/test_cloudformation/fixtures/route53_health_check.py index f6a2c9b8e..420cd38ba 100644 --- a/tests/test_cloudformation/fixtures/route53_health_check.py +++ b/tests/test_cloudformation/fixtures/route53_health_check.py @@ -26,7 +26,7 @@ template = { "myDNSRecord": { "Type": "AWS::Route53::RecordSet", "Properties": { - "HostedZoneName": {"Ref": "HostedZone"}, + "HostedZoneId": {"Ref": "HostedZone"}, "Comment": "DNS name for my instance.", "Name": "my_record_set", "Type": "A", diff --git a/tests/test_cloudformation/fixtures/route53_roundrobin.py b/tests/test_cloudformation/fixtures/route53_roundrobin.py index da4fecd4d..199e3e088 100644 --- a/tests/test_cloudformation/fixtures/route53_roundrobin.py +++ b/tests/test_cloudformation/fixtures/route53_roundrobin.py @@ -5,30 +5,37 @@ template = { "Description": "AWS CloudFormation Sample Template Route53_RoundRobin: Sample template showing how to use weighted round robin (WRR) DNS entried via Amazon Route 53. This contrived sample uses weighted CNAME records to illustrate that the weighting influences the return records. It assumes that you already have a Hosted Zone registered with Amazon Route 53. **WARNING** This template creates one or more AWS resources. You will be billed for the AWS resources used if you create a stack from this template.", + "Parameters": { + "R53ZoneName": { + "Type": "String", + "Default": "my_zone" + } + }, + "Resources": { "MyZone": { "Type": "AWS::Route53::HostedZone", "Properties": { - "Name": "my_zone" + "Name": {"Ref": "R53ZoneName"} } }, "MyDNSRecord": { "Type": "AWS::Route53::RecordSetGroup", "Properties": { - "HostedZoneName": {"Ref": "MyZone"}, + "HostedZoneId": {"Ref": "MyZone"}, "Comment": "Contrived example to redirect to aws.amazon.com 75% of the time and www.amazon.com 25% of the time.", "RecordSets": [{ "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "AWS"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["aws.amazon.com"], "Weight": "3" }, { "SetIdentifier": {"Fn::Join": [" ", [{"Ref": "AWS::StackName"}, "Amazon"]]}, - "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "MyZone"}, "."]]}, + "Name": {"Fn::Join": ["", [{"Ref": "AWS::StackName"}, ".", {"Ref": "AWS::Region"}, ".", {"Ref": "R53ZoneName"}, "."]]}, "Type": "CNAME", "TTL": "900", "ResourceRecords": ["www.amazon.com"], diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index df696d879..c4a138de7 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -38,7 +38,7 @@ from moto import ( mock_sns_deprecated, mock_sqs, mock_sqs_deprecated, -) + mock_elbv2) from .fixtures import ( ec2_classic_eip, @@ -2111,3 +2111,202 @@ def test_stack_spot_fleet(): launch_spec['SubnetId'].should.equal(subnet_id) launch_spec['SpotPrice'].should.equal("0.13") launch_spec['WeightedCapacity'].should.equal(2.0) + + +@mock_ec2 +@mock_elbv2 +@mock_cloudformation +def test_stack_elbv2_resources_integration(): + alb_template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Outputs": { + "albdns": { + "Description": "Load balanacer DNS", + "Value": {"Fn::GetAtt": ["alb", "DNSName"]}, + }, + "albname": { + "Description": "Load balancer name", + "Value": {"Fn::GetAtt": ["alb", "LoadBalancerName"]}, + }, + "canonicalhostedzoneid": { + "Description": "Load balancer canonical hosted zone ID", + "Value": {"Fn::GetAtt": ["alb", "CanonicalHostedZoneID"]}, + }, + }, + "Resources": { + "alb": { + "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", + "Properties": { + "Name": "myelbv2", + "Scheme": "internet-facing", + "Subnets": [{ + "Ref": "mysubnet", + }], + "SecurityGroups": [{ + "Ref": "mysg", + }], + "Type": "application", + "IpAddressType": "ipv4", + } + }, + "mytargetgroup1": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 80, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Matcher": { + "HttpCode": "200,201" + }, + "Name": "mytargetgroup1", + "Port": 80, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 80, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "mytargetgroup2": { + "Type": "AWS::ElasticLoadBalancingV2::TargetGroup", + "Properties": { + "HealthCheckIntervalSeconds": 30, + "HealthCheckPath": "/status", + "HealthCheckPort": 8080, + "HealthCheckProtocol": "HTTP", + "HealthCheckTimeoutSeconds": 5, + "HealthyThresholdCount": 30, + "UnhealthyThresholdCount": 5, + "Name": "mytargetgroup2", + "Port": 8080, + "Protocol": "HTTP", + "TargetType": "instance", + "Targets": [{ + "Id": { + "Ref": "ec2instance", + "Port": 8080, + }, + }], + "VpcId": { + "Ref": "myvpc", + } + } + }, + "listener": { + "Type": "AWS::ElasticLoadBalancingV2::Listener", + "Properties": { + "DefaultActions": [{ + "Type": "forward", + "TargetGroupArn": {"Ref": "mytargetgroup1"} + }], + "LoadBalancerArn": {"Ref": "alb"}, + "Port": "80", + "Protocol": "HTTP" + } + }, + "myvpc": { + "Type": "AWS::EC2::VPC", + "Properties": { + "CidrBlock": "10.0.0.0/16", + } + }, + "mysubnet": { + "Type": "AWS::EC2::Subnet", + "Properties": { + "CidrBlock": "10.0.0.0/27", + "VpcId": {"Ref": "myvpc"}, + } + }, + "mysg": { + "Type": "AWS::EC2::SecurityGroup", + "Properties": { + "GroupName": "mysg", + "GroupDescription": "test security group", + "VpcId": {"Ref": "myvpc"} + } + }, + "ec2instance": { + "Type": "AWS::EC2::Instance", + "Properties": { + "ImageId": "ami-1234abcd", + "UserData": "some user data", + } + }, + }, + } + alb_template_json = json.dumps(alb_template) + + cfn_conn = boto3.client("cloudformation", "us-west-1") + cfn_conn.create_stack( + StackName="elb_stack", + TemplateBody=alb_template_json, + ) + + elbv2_conn = boto3.client("elbv2", "us-west-1") + + load_balancers = elbv2_conn.describe_load_balancers()['LoadBalancers'] + len(load_balancers).should.equal(1) + load_balancers[0]['LoadBalancerName'].should.equal('myelbv2') + load_balancers[0]['Scheme'].should.equal('internet-facing') + load_balancers[0]['Type'].should.equal('application') + load_balancers[0]['IpAddressType'].should.equal('ipv4') + + target_groups = sorted( + elbv2_conn.describe_target_groups()['TargetGroups'], + key=lambda tg: tg['TargetGroupName']) # sort to do comparison with indexes + len(target_groups).should.equal(2) + target_groups[0]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[0]['HealthCheckPath'].should.equal('/status') + target_groups[0]['HealthCheckPort'].should.equal('80') + target_groups[0]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[0]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[0]['HealthyThresholdCount'].should.equal(30) + target_groups[0]['UnhealthyThresholdCount'].should.equal(5) + target_groups[0]['Matcher'].should.equal({'HttpCode': '200,201'}) + target_groups[0]['TargetGroupName'].should.equal('mytargetgroup1') + target_groups[0]['Port'].should.equal(80) + target_groups[0]['Protocol'].should.equal('HTTP') + target_groups[0]['TargetType'].should.equal('instance') + + target_groups[1]['HealthCheckIntervalSeconds'].should.equal(30) + target_groups[1]['HealthCheckPath'].should.equal('/status') + target_groups[1]['HealthCheckPort'].should.equal('8080') + target_groups[1]['HealthCheckProtocol'].should.equal('HTTP') + target_groups[1]['HealthCheckTimeoutSeconds'].should.equal(5) + target_groups[1]['HealthyThresholdCount'].should.equal(30) + target_groups[1]['UnhealthyThresholdCount'].should.equal(5) + target_groups[1]['Matcher'].should.equal({'HttpCode': '200'}) + target_groups[1]['TargetGroupName'].should.equal('mytargetgroup2') + target_groups[1]['Port'].should.equal(8080) + target_groups[1]['Protocol'].should.equal('HTTP') + target_groups[1]['TargetType'].should.equal('instance') + + listeners = elbv2_conn.describe_listeners(LoadBalancerArn=load_balancers[0]['LoadBalancerArn'])['Listeners'] + len(listeners).should.equal(1) + listeners[0]['LoadBalancerArn'].should.equal(load_balancers[0]['LoadBalancerArn']) + listeners[0]['Port'].should.equal(80) + listeners[0]['Protocol'].should.equal('HTTP') + listeners[0]['DefaultActions'].should.equal([{ + "Type": "forward", + "TargetGroupArn": target_groups[0]['TargetGroupArn'] + }]) + + # test outputs + stacks = cfn_conn.describe_stacks(StackName='elb_stack')['Stacks'] + len(stacks).should.equal(1) + + dns = list(filter(lambda item: item['OutputKey'] == 'albdns', stacks[0]['Outputs']))[0] + name = list(filter(lambda item: item['OutputKey'] == 'albname', stacks[0]['Outputs']))[0] + + dns['OutputValue'].should.equal(load_balancers[0]['DNSName']) + name['OutputValue'].should.equal(load_balancers[0]['LoadBalancerName']) diff --git a/tests/test_cloudwatch/test_cloudwatch.py b/tests/test_cloudwatch/test_cloudwatch.py index 9b3f76c36..2f8528855 100644 --- a/tests/test_cloudwatch/test_cloudwatch.py +++ b/tests/test_cloudwatch/test_cloudwatch.py @@ -118,12 +118,3 @@ def test_describe_alarms(): alarms = conn.describe_alarms() alarms.should.have.length_of(0) - - -@mock_cloudwatch_deprecated -def test_describe_state_value_unimplemented(): - conn = boto.connect_cloudwatch() - - conn.describe_alarms() - conn.describe_alarms.when.called_with( - state_value="foo").should.throw(NotImplementedError) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index 923ba0b75..e621a642a 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -87,6 +87,54 @@ def test_get_dashboard_fail(): raise RuntimeError('Should of raised error') +@mock_cloudwatch +def test_alarm_state(): + client = boto3.client('cloudwatch', region_name='eu-central-1') + + client.put_metric_alarm( + AlarmName='testalarm1', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + client.put_metric_alarm( + AlarmName='testalarm2', + MetricName='cpu', + Namespace='blah', + Period=10, + EvaluationPeriods=5, + Statistic='Average', + Threshold=2, + ComparisonOperator='GreaterThanThreshold', + ) + + # This is tested implicitly as if it doesnt work the rest will die + client.set_alarm_state( + AlarmName='testalarm1', + StateValue='ALARM', + StateReason='testreason', + StateReasonData='{"some": "json_data"}' + ) + + resp = client.describe_alarms( + StateValue='ALARM' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + + resp = client.describe_alarms( + StateValue='OK' + ) + len(resp['MetricAlarms']).should.equal(1) + resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + + # Just for sanity + resp = client.describe_alarms() + len(resp['MetricAlarms']).should.equal(2) diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 85d8feb34..05daf23aa 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -28,13 +28,13 @@ except ImportError: @mock_dynamodb2_deprecated def test_list_tables(): name = 'TestTable' - #{'schema': } + # Should make tables properly with boto dynamodb_backend2.create_table(name, schema=[ {u'KeyType': u'HASH', u'AttributeName': u'forum_name'}, {u'KeyType': u'RANGE', u'AttributeName': u'subject'} ]) conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk") assert conn.list_tables()["TableNames"] == [name] @@ -43,6 +43,7 @@ def test_list_tables(): @requires_boto_gte("2.9") @mock_dynamodb2_deprecated def test_list_tables_layer_1(): + # Should make tables properly with boto dynamodb_backend2.create_table("test_1", schema=[ {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) @@ -50,7 +51,7 @@ def test_list_tables_layer_1(): {u'KeyType': u'HASH', u'AttributeName': u'name'} ]) conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk") @@ -88,12 +89,22 @@ def test_list_table_tags(): ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) table_description = conn.describe_table(TableName=name) arn = table_description['Table']['TableArn'] - tags = [{'Key':'TestTag', 'Value': 'TestValue'}] - conn.tag_resource(ResourceArn=arn, - Tags=tags) + + # Tag table + tags = [{'Key': 'TestTag', 'Value': 'TestValue'}, {'Key': 'TestTag2', 'Value': 'TestValue2'}] + conn.tag_resource(ResourceArn=arn, Tags=tags) + + # Check tags resp = conn.list_tags_of_resource(ResourceArn=arn) assert resp["Tags"] == tags + # Remove 1 tag + conn.untag_resource(ResourceArn=arn, TagKeys=['TestTag']) + + # Check tags + resp = conn.list_tags_of_resource(ResourceArn=arn) + assert resp["Tags"] == [{'Key': 'TestTag2', 'Value': 'TestValue2'}] + @requires_boto_gte("2.9") @mock_dynamodb2 @@ -356,10 +367,21 @@ def test_basic_projection_expressions(): ) assert 'body' in results['Items'][0] + assert 'subject' not in results['Items'][0] assert results['Items'][0]['body'] == 'some test message' assert 'body' in results['Items'][1] + assert 'subject' not in results['Items'][1] assert results['Items'][1]['body'] == 'yet another test message' + # The projection expression should not remove data from storage + results = table.query( + KeyConditionExpression=Key('forum_name').eq( + 'the-key'), + ) + assert 'subject' in results['Items'][0] + assert 'body' in results['Items'][1] + assert 'forum_name' in results['Items'][1] + @mock_dynamodb2 def test_basic_projection_expressions_with_attr_expression_names(): @@ -576,29 +598,36 @@ def test_get_item_returns_consumed_capacity(): def test_filter_expression(): - # TODO NOT not yet supported row1 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '5'}, 'Desc': {'S': 'Some description'}, 'KV': {'SS': ['test1', 'test2']}}) row2 = moto.dynamodb2.models.Item(None, None, None, None, {'Id': {'N': '8'}, 'Subs': {'N': '10'}, 'Desc': {'S': 'A description'}, 'KV': {'SS': ['test3', 'test4']}}) + # NOT test 1 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT attribute_not_exists(Id)', {}, {}) + filter_expr.expr(row1).should.be(True) + + # NOT test 2 + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('NOT (Id = :v0)', {}, {':v0': {'N': 8}}) + filter_expr.expr(row1).should.be(False) # Id = 8 so should be false + # AND test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > 5 AND Subs < 7', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id > :v0 AND Subs < :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 7}}) filter_expr.expr(row1).should.be(True) filter_expr.expr(row2).should.be(False) # OR test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 5 OR Id=8', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 OR Id=:v1', {}, {':v0': {'N': 5}, ':v1': {'N': 8}}) filter_expr.expr(row1).should.be(True) # BETWEEN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN 5 AND 10', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id BETWEEN :v0 AND :v1', {}, {':v0': {'N': 5}, ':v1': {'N': 10}}) filter_expr.expr(row1).should.be(True) # PAREN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = 8 AND (Subs = 8 OR Subs = 5)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id = :v0 AND (Subs = :v0 OR Subs = :v1)', {}, {':v0': {'N': 8}, ':v1': {'N': 5}}) filter_expr.expr(row1).should.be(True) # IN test - filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN (7,8, 9)', {}, {}) + filter_expr = moto.dynamodb2.comparisons.get_filter_expression('Id IN :v0', {}, {':v0': {'NS': [7, 8, 9]}}) filter_expr.expr(row1).should.be(True) # attribute function tests @@ -622,6 +651,55 @@ def test_filter_expression(): filter_expr = moto.dynamodb2.comparisons.get_filter_expression('size(Desc) > size(KV)', {}, {}) filter_expr.expr(row1).should.be(True) + # Expression from @batkuip + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + '(#n0 < :v0 AND attribute_not_exists(#n1))', + {'#n0': 'Subs', '#n1': 'fanout_ts'}, + {':v0': {'N': '7'}} + ) + filter_expr.expr(row1).should.be(True) + + +@mock_dynamodb2 +def test_query_filter(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app1'} + } + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'S': 'app2'} + } + ) + + table = dynamodb.Table('test1') + response = table.query( + KeyConditionExpression=Key('client').eq('client1') + ) + assert response['Count'] == 2 + + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').eq('app2') + ) + assert response['Count'] == 1 + assert response['Items'][0]['app'] == 'app2' + @mock_dynamodb2 def test_scan_filter(): @@ -655,6 +733,84 @@ def test_scan_filter(): assert response['Count'] == 1 +@mock_dynamodb2 +def test_scan_filter2(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'} + } + ) + + response = client.scan( + TableName='test1', + Select='ALL_ATTRIBUTES', + FilterExpression='#tb >= :dt', + ExpressionAttributeNames={"#tb": "app"}, + ExpressionAttributeValues={":dt": {"N": str(1)}} + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter3(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item( + TableName='test1', + Item={ + 'client': {'S': 'client1'}, + 'app': {'N': '1'}, + 'active': {'BOOL': True} + } + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('active').eq(True) + ) + assert response['Count'] == 1 + + +@mock_dynamodb2 +def test_scan_filter4(): + client = boto3.client('dynamodb', region_name='us-east-1') + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'N'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + table = dynamodb.Table('test1') + response = table.scan( + FilterExpression=Attr('epoch_ts').lt(7) & Attr('fanout_ts').not_exists() + ) + # Just testing + assert response['Count'] == 0 + + @mock_dynamodb2 def test_bad_scan_filter(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -680,7 +836,6 @@ def test_bad_scan_filter(): raise RuntimeError('Should of raised ResourceInUseException') - @mock_dynamodb2 def test_duplicate_create(): client = boto3.client('dynamodb', region_name='us-east-1') @@ -776,3 +931,139 @@ def test_delete_item(): response = table.scan() assert response['Count'] == 0 + + +@mock_dynamodb2 +def test_describe_limits(): + client = boto3.client('dynamodb', region_name='eu-central-1') + resp = client.describe_limits() + + resp['AccountMaxReadCapacityUnits'].should.equal(20000) + resp['AccountMaxWriteCapacityUnits'].should.equal(20000) + resp['TableMaxWriteCapacityUnits'].should.equal(10000) + resp['TableMaxReadCapacityUnits'].should.equal(10000) + + +@mock_dynamodb2 +def test_set_ttl(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': True, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('ENABLED') + resp['TimeToLiveDescription']['AttributeName'].should.equal('expire') + + client.update_time_to_live( + TableName='test1', + TimeToLiveSpecification={ + 'Enabled': False, + 'AttributeName': 'expire' + } + ) + + resp = client.describe_time_to_live(TableName='test1') + resp['TimeToLiveDescription']['TimeToLiveStatus'].should.equal('DISABLED') + + +# https://github.com/spulec/moto/issues/1043 +@mock_dynamodb2 +def test_query_missing_expr_names(): + client = boto3.client('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + client.create_table( + TableName='test1', + AttributeDefinitions=[{'AttributeName': 'client', 'AttributeType': 'S'}, {'AttributeName': 'app', 'AttributeType': 'S'}], + KeySchema=[{'AttributeName': 'client', 'KeyType': 'HASH'}, {'AttributeName': 'app', 'KeyType': 'RANGE'}], + ProvisionedThroughput={'ReadCapacityUnits': 123, 'WriteCapacityUnits': 123} + ) + client.put_item(TableName='test1', Item={'client': {'S': 'test1'}, 'app': {'S': 'test1'}}) + client.put_item(TableName='test1', Item={'client': {'S': 'test2'}, 'app': {'S': 'test2'}}) + + resp = client.query(TableName='test1', KeyConditionExpression='client=:client', + ExpressionAttributeValues={':client': {'S': 'test1'}}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test1') + + resp = client.query(TableName='test1', KeyConditionExpression=':name=test2', + ExpressionAttributeNames={':name': 'client'}) + + resp['Count'].should.equal(1) + resp['Items'][0]['client']['S'].should.equal('test2') + + +# https://github.com/spulec/moto/issues/1342 +@mock_dynamodb2 +def test_update_item_on_map(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123', + 'body': {'nested': {'data': 'test'}}, + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({'nested': {'data': 'test'}}) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET body.#nested.#data = :tb', + ExpressionAttributeNames={ + '#nested': 'nested', + '#data': 'data' + }, + ExpressionAttributeValues={ + ':tb': 'new_value' + }) + + resp = table.scan() + resp['Items'][0]['body'].should.equal({'nested': {'data': 'new_value'}}) diff --git a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py index 0e1099559..5e635d5ef 100644 --- a/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py +++ b/tests/test_dynamodb2/test_dynamodb_table_without_range_key.py @@ -54,7 +54,7 @@ def test_create_table(): } } conn = boto.dynamodb2.connect_to_region( - 'us-west-2', + 'us-east-1', aws_access_key_id="ak", aws_secret_access_key="sk" ) @@ -425,7 +425,7 @@ def test_get_special_item(): @mock_dynamodb2_deprecated def test_update_item_remove(): - conn = boto.dynamodb2.connect_to_region("us-west-2") + conn = boto.dynamodb2.connect_to_region("us-east-1") table = Table.create('messages', schema=[ HashKey('username') ]) @@ -452,7 +452,7 @@ def test_update_item_remove(): @mock_dynamodb2_deprecated def test_update_item_set(): - conn = boto.dynamodb2.connect_to_region("us-west-2") + conn = boto.dynamodb2.connect_to_region("us-east-1") table = Table.create('messages', schema=[ HashKey('username') ]) diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py index cf9f73f0e..1029ba39e 100755 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -666,10 +666,6 @@ def test_ami_attribute_error_cases(): cm.exception.request_id.should_not.be.none -""" -Boto3 -""" - @mock_ec2 def test_ami_filter_wildcard(): ec2 = boto3.resource('ec2', region_name='us-west-1') @@ -678,3 +674,20 @@ def test_ami_filter_wildcard(): filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}])) assert filter_result == [image] + +@mock_ec2 +def test_ami_filter_by_owner_id(): + client = boto3.client('ec2', region_name='us-east-1') + + ubuntu_id = '099720109477' + + ubuntu_images = client.describe_images(Owners=[ubuntu_id]) + all_images = client.describe_images() + + ubuntu_ids = [ami['OwnerId'] for ami in ubuntu_images['Images']] + all_ids = [ami['OwnerId'] for ami in all_images['Images']] + + # Assert all ubuntu_ids are the same and one equals ubuntu_id + assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id + # Check we actually have a subset of images + assert len(ubuntu_ids) < len(all_ids) diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 4427d4843..9c07f38d6 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -17,12 +17,14 @@ def test_create_and_delete_volume(): volume = conn.create_volume(80, "us-east-1a") all_volumes = conn.get_all_volumes() - all_volumes.should.have.length_of(1) - all_volumes[0].size.should.equal(80) - all_volumes[0].zone.should.equal("us-east-1a") - all_volumes[0].encrypted.should.be(False) - volume = all_volumes[0] + current_volume = [item for item in all_volumes if item.id == volume.id] + current_volume.should.have.length_of(1) + current_volume[0].size.should.equal(80) + current_volume[0].zone.should.equal("us-east-1a") + current_volume[0].encrypted.should.be(False) + + volume = current_volume[0] with assert_raises(EC2ResponseError) as ex: volume.delete(dry_run=True) @@ -33,7 +35,9 @@ def test_create_and_delete_volume(): volume.delete() - conn.get_all_volumes().should.have.length_of(0) + all_volumes = conn.get_all_volumes() + my_volume = [item for item in all_volumes if item.id == volume.id] + my_volume.should.have.length_of(0) # Deleting something that was already deleted should throw an error with assert_raises(EC2ResponseError) as cm: @@ -57,7 +61,7 @@ def test_create_encrypted_volume_dryrun(): @mock_ec2_deprecated def test_create_encrypted_volume(): conn = boto.connect_ec2('the_key', 'the_secret') - conn.create_volume(80, "us-east-1a", encrypted=True) + volume = conn.create_volume(80, "us-east-1a", encrypted=True) with assert_raises(EC2ResponseError) as ex: conn.create_volume(80, "us-east-1a", encrypted=True, dry_run=True) @@ -66,7 +70,7 @@ def test_create_encrypted_volume(): ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateVolume operation: Request would have succeeded, but DryRun flag is set') - all_volumes = conn.get_all_volumes() + all_volumes = [vol for vol in conn.get_all_volumes() if vol.id == volume.id] all_volumes[0].encrypted.should.be(True) @@ -116,67 +120,69 @@ def test_volume_filters(): block_mapping = instance.block_device_mapping['/dev/sda1'] + volume_ids = (volume1.id, volume2.id, volume3.id, volume4.id, block_mapping.volume_id) + volumes_by_attach_time = conn.get_all_volumes( filters={'attachment.attach-time': block_mapping.attach_time}) set([vol.id for vol in volumes_by_attach_time] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_device = conn.get_all_volumes( filters={'attachment.device': '/dev/sda1'}) set([vol.id for vol in volumes_by_attach_device] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_instance_id = conn.get_all_volumes( filters={'attachment.instance-id': instance.id}) set([vol.id for vol in volumes_by_attach_instance_id] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_attach_status = conn.get_all_volumes( filters={'attachment.status': 'attached'}) set([vol.id for vol in volumes_by_attach_status] - ).should.equal(set([block_mapping.volume_id])) + ).should.equal({block_mapping.volume_id}) volumes_by_create_time = conn.get_all_volumes( filters={'create-time': volume4.create_time}) set([vol.create_time for vol in volumes_by_create_time] - ).should.equal(set([volume4.create_time])) + ).should.equal({volume4.create_time}) volumes_by_size = conn.get_all_volumes(filters={'size': volume2.size}) - set([vol.id for vol in volumes_by_size]).should.equal(set([volume2.id])) + set([vol.id for vol in volumes_by_size]).should.equal({volume2.id}) volumes_by_snapshot_id = conn.get_all_volumes( filters={'snapshot-id': snapshot.id}) set([vol.id for vol in volumes_by_snapshot_id] - ).should.equal(set([volume4.id])) + ).should.equal({volume4.id}) volumes_by_status = conn.get_all_volumes(filters={'status': 'in-use'}) set([vol.id for vol in volumes_by_status]).should.equal( - set([block_mapping.volume_id])) + {block_mapping.volume_id}) volumes_by_id = conn.get_all_volumes(filters={'volume-id': volume1.id}) - set([vol.id for vol in volumes_by_id]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_id]).should.equal({volume1.id}) volumes_by_tag_key = conn.get_all_volumes(filters={'tag-key': 'testkey1'}) - set([vol.id for vol in volumes_by_tag_key]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_tag_key]).should.equal({volume1.id}) volumes_by_tag_value = conn.get_all_volumes( filters={'tag-value': 'testvalue1'}) set([vol.id for vol in volumes_by_tag_value] - ).should.equal(set([volume1.id])) + ).should.equal({volume1.id}) volumes_by_tag = conn.get_all_volumes( filters={'tag:testkey1': 'testvalue1'}) - set([vol.id for vol in volumes_by_tag]).should.equal(set([volume1.id])) + set([vol.id for vol in volumes_by_tag]).should.equal({volume1.id}) volumes_by_unencrypted = conn.get_all_volumes( filters={'encrypted': 'false'}) - set([vol.id for vol in volumes_by_unencrypted]).should.equal( - set([block_mapping.volume_id, volume2.id]) + set([vol.id for vol in volumes_by_unencrypted if vol.id in volume_ids]).should.equal( + {block_mapping.volume_id, volume2.id} ) volumes_by_encrypted = conn.get_all_volumes(filters={'encrypted': 'true'}) - set([vol.id for vol in volumes_by_encrypted]).should.equal( - set([volume1.id, volume3.id, volume4.id]) + set([vol.id for vol in volumes_by_encrypted if vol.id in volume_ids]).should.equal( + {volume1.id, volume3.id, volume4.id} ) @@ -252,18 +258,20 @@ def test_create_snapshot(): snapshot.update() snapshot.status.should.equal('completed') - snapshots = conn.get_all_snapshots() + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] snapshots.should.have.length_of(1) snapshots[0].description.should.equal('a test snapshot') snapshots[0].start_time.should_not.be.none snapshots[0].encrypted.should.be(False) # Create snapshot without description + num_snapshots = len(conn.get_all_snapshots()) + snapshot = volume.create_snapshot() - conn.get_all_snapshots().should.have.length_of(2) + conn.get_all_snapshots().should.have.length_of(num_snapshots + 1) snapshot.delete() - conn.get_all_snapshots().should.have.length_of(1) + conn.get_all_snapshots().should.have.length_of(num_snapshots) # Deleting something that was already deleted should throw an error with assert_raises(EC2ResponseError) as cm: @@ -281,7 +289,7 @@ def test_create_encrypted_snapshot(): snapshot.update() snapshot.status.should.equal('completed') - snapshots = conn.get_all_snapshots() + snapshots = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] snapshots.should.have.length_of(1) snapshots[0].description.should.equal('a test snapshot') snapshots[0].start_time.should_not.be.none @@ -331,52 +339,52 @@ def test_snapshot_filters(): snapshots_by_description = conn.get_all_snapshots( filters={'description': 'testsnapshot1'}) set([snap.id for snap in snapshots_by_description] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_id = conn.get_all_snapshots( filters={'snapshot-id': snapshot1.id}) set([snap.id for snap in snapshots_by_id] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_start_time = conn.get_all_snapshots( filters={'start-time': snapshot1.start_time}) set([snap.start_time for snap in snapshots_by_start_time] - ).should.equal(set([snapshot1.start_time])) + ).should.equal({snapshot1.start_time}) snapshots_by_volume_id = conn.get_all_snapshots( filters={'volume-id': volume1.id}) set([snap.id for snap in snapshots_by_volume_id] - ).should.equal(set([snapshot1.id, snapshot2.id])) + ).should.equal({snapshot1.id, snapshot2.id}) snapshots_by_status = conn.get_all_snapshots( filters={'status': 'completed'}) - set([snap.id for snap in snapshots_by_status] - ).should.equal(set([snapshot1.id, snapshot2.id, snapshot3.id])) + ({snapshot1.id, snapshot2.id, snapshot3.id} - + {snap.id for snap in snapshots_by_status}).should.have.length_of(0) snapshots_by_volume_size = conn.get_all_snapshots( filters={'volume-size': volume1.size}) set([snap.id for snap in snapshots_by_volume_size] - ).should.equal(set([snapshot1.id, snapshot2.id])) + ).should.equal({snapshot1.id, snapshot2.id}) snapshots_by_tag_key = conn.get_all_snapshots( filters={'tag-key': 'testkey1'}) set([snap.id for snap in snapshots_by_tag_key] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_tag_value = conn.get_all_snapshots( filters={'tag-value': 'testvalue1'}) set([snap.id for snap in snapshots_by_tag_value] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_tag = conn.get_all_snapshots( filters={'tag:testkey1': 'testvalue1'}) set([snap.id for snap in snapshots_by_tag] - ).should.equal(set([snapshot1.id])) + ).should.equal({snapshot1.id}) snapshots_by_encrypted = conn.get_all_snapshots( filters={'encrypted': 'true'}) set([snap.id for snap in snapshots_by_encrypted] - ).should.equal(set([snapshot3.id])) + ).should.equal({snapshot3.id}) @mock_ec2_deprecated @@ -563,9 +571,11 @@ def test_volume_tag_escaping(): ex.exception.status.should.equal(400) ex.exception.message.should.equal( 'An error occurred (DryRunOperation) when calling the CreateTags operation: Request would have succeeded, but DryRun flag is set') - dict(conn.get_all_snapshots()[0].tags).should_not.be.equal( + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should_not.be.equal( {'key': ''}) snapshot.add_tags({'key': ''}) - dict(conn.get_all_snapshots()[0].tags).should.equal({'key': ''}) + snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] + dict(snaps[0].tags).should.equal({'key': ''}) diff --git a/tests/test_ec2/test_instances.py b/tests/test_ec2/test_instances.py index 04e6a6daa..84b4fbd7d 100644 --- a/tests/test_ec2/test_instances.py +++ b/tests/test_ec2/test_instances.py @@ -5,7 +5,9 @@ from nose.tools import assert_raises import base64 import datetime +import ipaddress +import six import boto import boto3 from boto.ec2.instance import Reservation, InstanceAttribute @@ -215,7 +217,6 @@ def test_create_with_tags(): len(instances['Instances'][0]['Tags']).should.equal(3) - @mock_ec2_deprecated def test_get_instances_filtering_by_state(): conn = boto.connect_ec2() @@ -413,6 +414,7 @@ def test_get_instances_filtering_by_image_id(): 'Values': [image_id]}])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_private_dns(): image_id = 'ami-1234abcd' @@ -427,6 +429,7 @@ def test_get_instances_filtering_by_private_dns(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_ni_private_dns(): image_id = 'ami-1234abcd' @@ -441,6 +444,7 @@ def test_get_instances_filtering_by_ni_private_dns(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_instance_group_name(): image_id = 'ami-1234abcd' @@ -458,6 +462,7 @@ def test_get_instances_filtering_by_instance_group_name(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2 def test_get_instances_filtering_by_instance_group_id(): image_id = 'ami-1234abcd' @@ -476,6 +481,7 @@ def test_get_instances_filtering_by_instance_group_id(): ])['Reservations'] reservations[0]['Instances'].should.have.length_of(1) + @mock_ec2_deprecated def test_get_instances_filtering_by_tag(): conn = boto.connect_ec2() @@ -830,18 +836,113 @@ def test_run_instance_with_placement(): instance.placement.should.equal("us-east-1b") -@mock_ec2_deprecated -def test_run_instance_with_subnet(): - conn = boto.connect_vpc('the_key', 'the_secret') - vpc = conn.create_vpc("10.0.0.0/16") - subnet = conn.create_subnet(vpc.id, "10.0.0.0/18") - reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id) - instance = reservation.instances[0] +@mock_ec2 +def test_run_instance_with_subnet_boto3(): + client = boto3.client('ec2', region_name='eu-central-1') - instance.subnet_id.should.equal(subnet.id) + ip_networks = [ + (ipaddress.ip_network('10.0.0.0/16'), ipaddress.ip_network('10.0.99.0/24')), + (ipaddress.ip_network('192.168.42.0/24'), ipaddress.ip_network('192.168.42.0/25')) + ] - all_enis = conn.get_all_network_interfaces() - all_enis.should.have.length_of(1) + # Tests instances are created with the correct IPs + for vpc_cidr, subnet_cidr in ip_networks: + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + + priv_ipv4 = ipaddress.ip_address(six.text_type(instance['PrivateIpAddress'])) + subnet_cidr.should.contain(priv_ipv4) + + +@mock_ec2 +def test_run_instance_with_specified_private_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id, + PrivateIpAddress='192.168.42.5' + ) + instance = resp['Instances'][0] + instance['SubnetId'].should.equal(subnet_id) + instance['PrivateIpAddress'].should.equal('192.168.42.5') + + +@mock_ec2 +def test_run_instance_mapped_public_ipv4(): + client = boto3.client('ec2', region_name='eu-central-1') + + vpc_cidr = ipaddress.ip_network('192.168.42.0/24') + subnet_cidr = ipaddress.ip_network('192.168.42.0/25') + + resp = client.create_vpc( + CidrBlock=str(vpc_cidr), + AmazonProvidedIpv6CidrBlock=False, + DryRun=False, + InstanceTenancy='default' + ) + vpc_id = resp['Vpc']['VpcId'] + + resp = client.create_subnet( + CidrBlock=str(subnet_cidr), + VpcId=vpc_id + ) + subnet_id = resp['Subnet']['SubnetId'] + client.modify_subnet_attribute( + SubnetId=subnet_id, + MapPublicIpOnLaunch={'Value': True} + ) + + resp = client.run_instances( + ImageId='ami-1234abcd', + MaxCount=1, + MinCount=1, + SubnetId=subnet_id + ) + instance = resp['Instances'][0] + instance.should.contain('PublicDnsName') + instance.should.contain('PublicIpAddress') + len(instance['PublicDnsName']).should.be.greater_than(0) + len(instance['PublicIpAddress']).should.be.greater_than(0) @mock_ec2_deprecated @@ -853,7 +954,7 @@ def test_run_instance_with_nic_autocreated(): 'test security group #1', 'this is a test security group') security_group2 = conn.create_security_group( 'test security group #2', 'this is a test security group') - private_ip = "54.0.0.1" + private_ip = "10.0.0.1" reservation = conn.run_instances('ami-1234abcd', subnet_id=subnet.id, security_groups=[security_group1.name], @@ -880,6 +981,7 @@ def test_run_instance_with_nic_autocreated(): eni.private_ip_addresses.should.have.length_of(1) eni.private_ip_addresses[0].private_ip_address.should.equal(private_ip) + @mock_ec2_deprecated def test_run_instance_with_nic_preexisting(): conn = boto.connect_vpc('the_key', 'the_secret') @@ -1012,6 +1114,7 @@ def test_ec2_classic_has_public_ip_address(): instance.private_ip_address.should_not.equal(None) instance.private_dns_name.should.contain(instance.private_ip_address.replace('.', '-')) + @mock_ec2_deprecated def test_run_instance_with_keypair(): conn = boto.connect_ec2('the_key', 'the_secret') @@ -1113,3 +1216,41 @@ def test_get_instance_by_security_group(): assert len(security_group_instances) == 1 assert security_group_instances[0].id == instance.id + + +@mock_ec2 +def test_modify_delete_on_termination(): + ec2_client = boto3.resource('ec2', region_name='us-west-1') + result = ec2_client.create_instances(ImageId='ami-12345678', MinCount=1, MaxCount=1) + instance = result[0] + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(False) + instance.modify_attribute( + BlockDeviceMappings=[{ + 'DeviceName': '/dev/sda1', + 'Ebs': {'DeleteOnTermination': True} + }] + ) + instance.load() + instance.block_device_mappings[0]['Ebs']['DeleteOnTermination'].should.be(True) + +@mock_ec2 +def test_create_instance_ebs_optimized(): + ec2_resource = boto3.resource('ec2', region_name='eu-west-1') + + instance = ec2_resource.create_instances( + ImageId = 'ami-12345678', + MaxCount = 1, + MinCount = 1, + EbsOptimized = True, + )[0] + instance.load() + instance.ebs_optimized.should.be(True) + + instance.modify_attribute( + EbsOptimized={ + 'Value': False + } + ) + instance.load() + instance.ebs_optimized.should.be(False) diff --git a/tests/test_ec2/test_route_tables.py b/tests/test_ec2/test_route_tables.py index 6e6c62741..b27484468 100644 --- a/tests/test_ec2/test_route_tables.py +++ b/tests/test_ec2/test_route_tables.py @@ -126,9 +126,9 @@ def test_route_tables_filters_associations(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") - subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/18") - subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/18") - subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/18") + subnet1 = conn.create_subnet(vpc.id, "10.0.0.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.0.1.0/24") + subnet3 = conn.create_subnet(vpc.id, "10.0.2.0/24") route_table1 = conn.create_route_table(vpc.id) route_table2 = conn.create_route_table(vpc.id) diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index bb3a8d36b..ccef5a288 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -356,7 +356,7 @@ def test_retrieved_snapshots_must_contain_their_tags(): # Fetch the snapshot again all_snapshots = conn.get_all_snapshots() - snapshot = all_snapshots[0] + snapshot = [item for item in all_snapshots if item.id == snapshot.id][0] retrieved_tags = snapshot.tags conn.delete_snapshot(snapshot.id) diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 67d1a2cab..00628e22f 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -414,7 +414,8 @@ def test_get_authorization_token_assume_region(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token() - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', @@ -429,7 +430,8 @@ def test_get_authorization_token_explicit_regions(): client = boto3.client('ecr', region_name='us-east-1') auth_token_response = client.get_authorization_token(registryIds=['us-east-1', 'us-west-1']) - list(auth_token_response.keys()).should.equal(['authorizationData', 'ResponseMetadata']) + auth_token_response.should.contain('authorizationData') + auth_token_response.should.contain('ResponseMetadata') auth_token_response['authorizationData'].should.equal([ { 'authorizationToken': 'QVdTOnVzLWVhc3QtMS1hdXRoLXRva2Vu', diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 1cc147fc5..5fcc297aa 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -714,6 +714,9 @@ def test_describe_container_instances(): for ci in response['containerInstances']] for arn in test_instance_arns: response_arns.should.contain(arn) + for instance in response['containerInstances']: + instance.keys().should.contain('runningTasksCount') + instance.keys().should.contain('pendingTasksCount') @mock_ec2 @@ -1210,6 +1213,7 @@ def test_resource_reservation_and_release(): remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) registered_resources['PORTS'].append('80') remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) client.stop_task( cluster='test_ecs_cluster', task=run_response['tasks'][0].get('taskArn'), @@ -1223,6 +1227,7 @@ def test_resource_reservation_and_release(): remaining_resources['CPU'].should.equal(registered_resources['CPU']) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) @mock_ecs @@ -1606,6 +1611,267 @@ def test_update_service_through_cloudformation_should_trigger_replacement(): len(resp['serviceArns']).should.equal(1) +@mock_ec2 +@mock_ecs +def test_attributes(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + instances = [] + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn1 = response['containerInstance']['containerInstanceArn'] + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + instances.append(test_instance) + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn2 = response['containerInstance']['containerInstanceArn'] + partial_arn2 = full_arn2.rsplit('/', 1)[-1] + + full_arn2.should_not.equal(full_arn1) # uuid1 isnt unique enough when the pc is fast ;-) + + # Ok set instance 1 with 1 attribute, instance 2 with another, and all of them with a 3rd. + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + ] + ) + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + + NUM_CUSTOM_ATTRIBUTES = 4 # 2 specific to individual machines and 1 global, going to both machines (2 + 1*2) + NUM_DEFAULT_ATTRIBUTES = 4 + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + # Tests that the attrs have been set properly + len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) + len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + + ecs_client.delete_attributes( + cluster=test_cluster_name, + attributes=[ + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + ] + ) + NUM_CUSTOM_ATTRIBUTES -= 1 + + resp = ecs_client.list_attributes( + cluster=test_cluster_name, + targetType='container-instance' + ) + attrs = resp['attributes'] + len(attrs).should.equal(NUM_CUSTOM_ATTRIBUTES + (NUM_DEFAULT_ATTRIBUTES * len(instances))) + + +@mock_ecs +def test_poll_endpoint(): + # Combined put, list delete attributes into the same test due to the amount of setup + ecs_client = boto3.client('ecs', region_name='us-east-1') + + # Just a placeholder until someone actually wants useless data, just testing it doesnt raise an exception + resp = ecs_client.discover_poll_endpoint(cluster='blah', containerInstance='blah') + resp.should.contain('endpoint') + resp.should.contain('telemetryEndpoint') + + +@mock_ecs +def test_list_task_definition_families(): + client = boto3.client('ecs', region_name='us-east-1') + client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + client.register_task_definition( + family='alt_test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'cpu': 1024, + 'memory': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'} + } + ] + ) + + resp1 = client.list_task_definition_families() + resp2 = client.list_task_definition_families(familyPrefix='alt') + + len(resp1['families']).should.equal(2) + len(resp2['families']).should.equal(1) + + +@mock_ec2 +@mock_ecs +def test_default_container_instance_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + + default_attributes = response['containerInstance']['attributes'] + assert len(default_attributes) == 4 + expected_result = [ + {'name': 'ecs.availability-zone', 'value': test_instance.placement['AvailabilityZone']}, + {'name': 'ecs.ami-id', 'value': test_instance.image_id}, + {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, + {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} + ] + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, key=lambda item: item['name']) + + +@mock_ec2 +@mock_ecs +def test_describe_container_instances_with_attributes(): + ecs_client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + # Create cluster and EC2 instance + _ = ecs_client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + # Register container instance + response = ecs_client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + response['containerInstance'][ + 'ec2InstanceId'].should.equal(test_instance.id) + full_arn = response['containerInstance']['containerInstanceArn'] + container_instance_id = full_arn.rsplit('/', 1)[-1] + default_attributes = response['containerInstance']['attributes'] + + # Set attributes on container instance, one without a value + attributes = [ + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] + ecs_client.put_attributes( + cluster=test_cluster_name, + attributes=attributes + ) + + # Describe container instance, should have attributes previously set + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=[container_instance_id]) + + assert len(described_instance['containerInstances']) == 1 + assert isinstance(described_instance['containerInstances'][0]['attributes'], list) + + # Remove additional info passed to put_attributes + cleaned_attributes = [] + for attribute in attributes: + attribute.pop('targetId', None) + attribute.pop('targetType', None) + cleaned_attributes.append(attribute) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], key=lambda item: item['name']) + expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) + assert described_attributes == expected_attributes + + def _fetch_container_instance_resources(container_instance_description): remaining_resources = {} registered_resources = {} diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 98634c677..4fb527525 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -1,11 +1,13 @@ from __future__ import unicode_literals +import os import boto3 import botocore from botocore.exceptions import ClientError from nose.tools import assert_raises import sure # noqa -from moto import mock_elbv2, mock_ec2 +from moto import mock_elbv2, mock_ec2, mock_acm +from moto.elbv2 import elbv2_backends @mock_elbv2 @@ -283,6 +285,21 @@ def test_create_target_group_and_listeners(): load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) response = conn.create_target_group( Name='a-target', Protocol='HTTP', @@ -395,6 +412,43 @@ def test_create_target_group_and_listeners(): response = conn.describe_target_groups() response.get('TargetGroups').should.have.length_of(0) +@mock_elbv2 +@mock_ec2 +def test_create_target_group_without_non_required_parameters(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + # request without HealthCheckIntervalSeconds parameter + # which is default to 30 seconds + response = conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080' + ) + target_group = response.get('TargetGroups')[0] + target_group.should_not.be.none @mock_elbv2 @mock_ec2 @@ -723,6 +777,21 @@ def test_handle_listener_rules(): load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + # Can't create a target group with an invalid protocol + with assert_raises(ClientError): + conn.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='/HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) response = conn.create_target_group( Name='a-target', Protocol='HTTP', @@ -1030,3 +1099,373 @@ def test_describe_invalid_target_group(): # Check error raises correctly with assert_raises(ClientError): conn.describe_target_groups(Names=['invalid']) + + +@mock_elbv2 +def test_describe_account_limits(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_account_limits() + resp['Limits'][0].should.contain('Name') + resp['Limits'][0].should.contain('Max') + + +@mock_elbv2 +def test_describe_ssl_policies(): + client = boto3.client('elbv2', region_name='eu-central-1') + + resp = client.describe_ssl_policies() + len(resp['SslPolicies']).should.equal(5) + + resp = client.describe_ssl_policies(Names=['ELBSecurityPolicy-TLS-1-2-2017-01', 'ELBSecurityPolicy-2016-08']) + len(resp['SslPolicies']).should.equal(2) + + +@mock_elbv2 +@mock_ec2 +def test_set_ip_address_type(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + # Internal LBs cant be dualstack yet + with assert_raises(ClientError): + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + # Create internet facing one + response = client.create_load_balancer( + Name='my-lb2', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internet-facing', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_ip_address_type( + LoadBalancerArn=arn, + IpAddressType='dualstack' + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_security_groups(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + security_group2 = ec2.create_security_group( + GroupName='b-security-group', Description='Second One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=[security_group.id, security_group2.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['SecurityGroups']).should.equal(2) + + with assert_raises(ClientError): + client.set_security_groups( + LoadBalancerArn=arn, + SecurityGroups=['non_existant'] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + subnet3 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1c') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet3.id] + ) + + resp = client.describe_load_balancers(LoadBalancerArns=[arn]) + len(resp['LoadBalancers'][0]['AvailabilityZones']).should.equal(3) + + # Only 1 AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id] + ) + + # Multiple subnets in same AZ + with assert_raises(ClientError): + client.set_subnets( + LoadBalancerArn=arn, + Subnets=[subnet1.id, subnet2.id, subnet2.id] + ) + + +@mock_elbv2 +@mock_ec2 +def test_set_subnets(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + arn = response['LoadBalancers'][0]['LoadBalancerArn'] + + client.modify_load_balancer_attributes( + LoadBalancerArn=arn, + Attributes=[{'Key': 'idle_timeout.timeout_seconds', 'Value': '600'}] + ) + + # Check its 600 not 60 + response = client.describe_load_balancer_attributes( + LoadBalancerArn=arn + ) + idle_timeout = list(filter(lambda item: item['Key'] == 'idle_timeout.timeout_seconds', response['Attributes']))[0] + idle_timeout['Value'].should.equal('600') + + +@mock_elbv2 +@mock_ec2 +def test_modify_target_group(): + client = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + arn = response.get('TargetGroups')[0]['TargetGroupArn'] + + client.modify_target_group( + TargetGroupArn=arn, + HealthCheckProtocol='HTTPS', + HealthCheckPort='8081', + HealthCheckPath='/status', + HealthCheckIntervalSeconds=10, + HealthCheckTimeoutSeconds=10, + HealthyThresholdCount=10, + UnhealthyThresholdCount=4, + Matcher={'HttpCode': '200-399'} + ) + + response = client.describe_target_groups( + TargetGroupArns=[arn] + ) + response['TargetGroups'][0]['Matcher']['HttpCode'].should.equal('200-399') + response['TargetGroups'][0]['HealthCheckIntervalSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthCheckPath'].should.equal('/status') + response['TargetGroups'][0]['HealthCheckPort'].should.equal('8081') + response['TargetGroups'][0]['HealthCheckProtocol'].should.equal('HTTPS') + response['TargetGroups'][0]['HealthCheckTimeoutSeconds'].should.equal(10) + response['TargetGroups'][0]['HealthyThresholdCount'].should.equal(10) + response['TargetGroups'][0]['UnhealthyThresholdCount'].should.equal(4) + + +@mock_elbv2 +@mock_ec2 +@mock_acm +def test_modify_listener_http_to_https(): + client = boto3.client('elbv2', region_name='eu-central-1') + acm = boto3.client('acm', region_name='eu-central-1') + ec2 = boto3.resource('ec2', region_name='eu-central-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='eu-central-1b') + + response = client.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[{'Key': 'key_name', 'Value': 'a_value'}]) + + load_balancer_arn = response.get('LoadBalancers')[0].get('LoadBalancerArn') + + response = client.create_target_group( + Name='a-target', + Protocol='HTTP', + Port=8080, + VpcId=vpc.id, + HealthCheckProtocol='HTTP', + HealthCheckPort='8080', + HealthCheckPath='/', + HealthCheckIntervalSeconds=5, + HealthCheckTimeoutSeconds=5, + HealthyThresholdCount=5, + UnhealthyThresholdCount=2, + Matcher={'HttpCode': '200'}) + target_group = response.get('TargetGroups')[0] + target_group_arn = target_group['TargetGroupArn'] + + # Plain HTTP listener + response = client.create_listener( + LoadBalancerArn=load_balancer_arn, + Protocol='HTTP', + Port=80, + DefaultActions=[{'Type': 'forward', 'TargetGroupArn': target_group_arn}] + ) + listener_arn = response['Listeners'][0]['ListenerArn'] + + response = acm.request_certificate( + DomainName='google.com', + SubjectAlternativeNames=['google.com', 'www.google.com', 'mail.google.com'], + ) + google_arn = response['CertificateArn'] + response = acm.request_certificate( + DomainName='yahoo.com', + SubjectAlternativeNames=['yahoo.com', 'www.yahoo.com', 'mail.yahoo.com'], + ) + yahoo_arn = response['CertificateArn'] + + response = client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False}, + {'CertificateArn': yahoo_arn, 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + response['Listeners'][0]['Port'].should.equal(443) + response['Listeners'][0]['Protocol'].should.equal('HTTPS') + response['Listeners'][0]['SslPolicy'].should.equal('ELBSecurityPolicy-TLS-1-2-2017-01') + len(response['Listeners'][0]['Certificates']).should.equal(2) + + # Check default cert, can't do this in server mode + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'false': + listener = elbv2_backends['eu-central-1'].load_balancers[load_balancer_arn].listeners[listener_arn] + listener.certificate.should.equal(yahoo_arn) + + # No default cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': google_arn, 'IsDefault': False} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) + + # Bad cert + with assert_raises(ClientError): + client.modify_listener( + ListenerArn=listener_arn, + Port=443, + Protocol='HTTPS', + SslPolicy='ELBSecurityPolicy-TLS-1-2-2017-01', + Certificates=[ + {'CertificateArn': 'lalala', 'IsDefault': True} + ], + DefaultActions=[ + {'Type': 'forward', 'TargetGroupArn': target_group_arn} + ] + ) diff --git a/tests/test_events/test_events.py b/tests/test_events/test_events.py index da8238f72..e839bde5b 100644 --- a/tests/test_events/test_events.py +++ b/tests/test_events/test_events.py @@ -3,6 +3,8 @@ import random import boto3 from moto.events import mock_events +from botocore.exceptions import ClientError +from nose.tools import assert_raises RULES = [ @@ -171,11 +173,36 @@ def test_remove_targets(): assert(targets_before - 1 == targets_after) -if __name__ == '__main__': - test_list_rules() - test_describe_rule() - test_enable_disable_rule() - test_list_rule_names_by_target() - test_list_rules() - test_list_targets_by_rule() - test_remove_targets() +@mock_events +def test_permissions(): + client = boto3.client('events', 'eu-central-1') + + client.put_permission(Action='PutEvents', Principal='111111111111', StatementId='Account1') + client.put_permission(Action='PutEvents', Principal='222222222222', StatementId='Account2') + + resp = client.describe_event_bus() + assert len(resp['Policy']['Statement']) == 2 + + client.remove_permission(StatementId='Account2') + + resp = client.describe_event_bus() + assert len(resp['Policy']['Statement']) == 1 + assert resp['Policy']['Statement'][0]['Sid'] == 'Account1' + + +@mock_events +def test_put_events(): + client = boto3.client('events', 'eu-central-1') + + event = { + "Source": "com.mycompany.myapp", + "Detail": '{"key1": "value3", "key2": "value4"}', + "Resources": ["resource1", "resource2"], + "DetailType": "myDetailType" + } + + client.put_events(Entries=[event]) + # Boto3 would error if it didn't return 200 OK + + with assert_raises(ClientError): + client.put_events(Entries=[event]*20) diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py new file mode 100644 index 000000000..31631e459 --- /dev/null +++ b/tests/test_iot/test_iot.py @@ -0,0 +1,179 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_iot + + +@mock_iot +def test_things(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + type_name = 'my-type-name' + + # thing type + thing_type = client.create_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeArn') + + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(1) + for thing_type in res['thingTypes']: + thing_type.should.have.key('thingTypeName').which.should_not.be.none + + thing_type = client.describe_thing_type(thingTypeName=type_name) + thing_type.should.have.key('thingTypeName').which.should.equal(type_name) + thing_type.should.have.key('thingTypeProperties') + thing_type.should.have.key('thingTypeMetadata') + + # thing + thing = client.create_thing(thingName=name, thingTypeName=type_name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + + thing = client.update_thing(thingName=name, attributePayload={'attributes': {'k1': 'v1'}}) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should.have.key('thingName').which.should_not.be.none + res['things'][0]['attributes'].should.have.key('k1').which.should.equal('v1') + + thing = client.describe_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('defaultClientId') + thing.should.have.key('thingTypeName') + thing.should.have.key('attributes') + thing.should.have.key('version') + + # delete thing + client.delete_thing(thingName=name) + res = client.list_things() + res.should.have.key('things').which.should.have.length_of(0) + + # delete thing type + client.delete_thing_type(thingTypeName=type_name) + res = client.list_thing_types() + res.should.have.key('thingTypes').which.should.have.length_of(0) + + +@mock_iot +def test_certs(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=True) + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('certificatePem').which.should_not.be.none + cert.should.have.key('keyPair') + cert['keyPair'].should.have.key('PublicKey').which.should_not.be.none + cert['keyPair'].should.have.key('PrivateKey').which.should_not.be.none + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('certificateArn').which.should_not.be.none + cert_desc.should.have.key('certificateId').which.should_not.be.none + cert_desc.should.have.key('certificatePem').which.should_not.be.none + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(1) + for cert in res['certificates']: + cert.should.have.key('certificateArn').which.should_not.be.none + cert.should.have.key('certificateId').which.should_not.be.none + cert.should.have.key('status').which.should_not.be.none + cert.should.have.key('creationDate').which.should_not.be.none + + client.update_certificate(certificateId=cert_id, newStatus='REVOKED') + cert = client.describe_certificate(certificateId=cert_id) + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + + client.delete_certificate(certificateId=cert_id) + res = client.list_certificates() + res.should.have.key('certificates').which.should.have.length_of(0) + +@mock_iot +def test_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=name, policyDocument=doc) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('policyVersionId').which.should.equal('1') + + policy = client.get_policy(policyName=name) + policy.should.have.key('policyName').which.should.equal(name) + policy.should.have.key('policyArn').which.should_not.be.none + policy.should.have.key('policyDocument').which.should.equal(doc) + policy.should.have.key('defaultVersionId').which.should.equal('1') + + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + client.delete_policy(policyName=name) + res = client.list_policies() + res.should.have.key('policies').which.should.have.length_of(0) + + +@mock_iot +def test_principal_policy(): + client = boto3.client('iot', region_name='ap-northeast-1') + policy_name = 'my-policy' + doc = '{}' + policy = client.create_policy(policyName=policy_name, policyDocument=doc) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_principal_policy(policyName=policy_name, principal=cert_arn) + + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(1) + for policy in res['policies']: + policy.should.have.key('policyName').which.should_not.be.none + policy.should.have.key('policyArn').which.should_not.be.none + + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_principal_policy(policyName=policy_name, principal=cert_arn) + res = client.list_principal_policies(principal=cert_arn) + res.should.have.key('policies').which.should.have.length_of(0) + res = client.list_policy_principals(policyName=policy_name) + res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_principal_thing(): + client = boto3.client('iot', region_name='ap-northeast-1') + thing_name = 'my-thing' + thing = client.create_thing(thingName=thing_name) + cert = client.create_keys_and_certificate(setAsActive=True) + cert_arn = cert['certificateArn'] + + client.attach_thing_principal(thingName=thing_name, principal=cert_arn) + + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(1) + for thing in res['things']: + thing.should_not.be.none + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(1) + for principal in res['principals']: + principal.should_not.be.none + + client.detach_thing_principal(thingName=thing_name, principal=cert_arn) + res = client.list_principal_things(principal=cert_arn) + res.should.have.key('things').which.should.have.length_of(0) + res = client.list_thing_principals(thingName=thing_name) + res.should.have.key('principals').which.should.have.length_of(0) diff --git a/tests/test_iot/test_server.py b/tests/test_iot/test_server.py new file mode 100644 index 000000000..47091531a --- /dev/null +++ b/tests/test_iot/test_server.py @@ -0,0 +1,19 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iot + +''' +Test the different server responses +''' + +@mock_iot +def test_iot_list(): + backend = server.create_backend_app("iot") + test_client = backend.test_client() + + # just making sure that server is up + res = test_client.get('/things') + res.status_code.should.equal(404) diff --git a/tests/test_iotdata/test_iotdata.py b/tests/test_iotdata/test_iotdata.py new file mode 100644 index 000000000..09c1ada4c --- /dev/null +++ b/tests/test_iotdata/test_iotdata.py @@ -0,0 +1,93 @@ +from __future__ import unicode_literals + +import json +import boto3 +import sure # noqa +from nose.tools import assert_raises +from botocore.exceptions import ClientError +from moto import mock_iotdata, mock_iot + + +@mock_iot +@mock_iotdata +def test_basic(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + client.delete_thing_shadow(thingName=name) + with assert_raises(ClientError): + client.get_thing_shadow(thingName=name) + + +@mock_iot +@mock_iotdata +def test_update(): + iot_client = boto3.client('iot', region_name='ap-northeast-1') + client = boto3.client('iot-data', region_name='ap-northeast-1') + name = 'my-thing' + raw_payload = b'{"state": {"desired": {"led": "on"}}}' + iot_client.create_thing(thingName=name) + + # first update + res = client.update_thing_shadow(thingName=name, payload=raw_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"desired": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "delta": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(1) + payload.should.have.key('timestamp') + + # reporting new state + new_payload = b'{"state": {"reported": {"led": "on"}}}' + res = client.update_thing_shadow(thingName=name, payload=new_payload) + payload = json.loads(res['payload'].read()) + expected_state = '{"reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('reported').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + res = client.get_thing_shadow(thingName=name) + payload = json.loads(res['payload'].read()) + expected_state = b'{"desired": {"led": "on"}, "reported": {"led": "on"}}' + payload.should.have.key('state').which.should.equal(json.loads(expected_state)) + payload.should.have.key('metadata').which.should.have.key('desired').which.should.have.key('led') + payload.should.have.key('version').which.should.equal(2) + payload.should.have.key('timestamp') + + +@mock_iotdata +def test_publish(): + client = boto3.client('iot-data', region_name='ap-northeast-1') + client.publish(topic='test/topic', qos=1, payload=b'') diff --git a/tests/test_iotdata/test_server.py b/tests/test_iotdata/test_server.py new file mode 100644 index 000000000..42a5c5f22 --- /dev/null +++ b/tests/test_iotdata/test_server.py @@ -0,0 +1,20 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server +from moto import mock_iotdata + +''' +Test the different server responses +''' + +@mock_iotdata +def test_iotdata_list(): + backend = server.create_backend_app("iot-data") + test_client = backend.test_client() + + # just making sure that server is up + thing_name = 'nothing' + res = test_client.get('/things/{}/shadow'.format(thing_name)) + res.status_code.should.equal(404) diff --git a/tests/test_kms/test_kms.py b/tests/test_kms/test_kms.py index 8d034c7ff..96715de71 100644 --- a/tests/test_kms/test_kms.py +++ b/tests/test_kms/test_kms.py @@ -491,7 +491,14 @@ def test__delete_alias(): key_id = create_resp['KeyMetadata']['KeyId'] alias = 'alias/my-alias' + # added another alias here to make sure that the deletion of the alias can + # be done when there are multiple existing aliases. + another_create_resp = kms.create_key() + another_key_id = create_resp['KeyMetadata']['KeyId'] + another_alias = 'alias/another-alias' + kms.create_alias(alias, key_id) + kms.create_alias(another_alias, another_key_id) resp = kms.delete_alias(alias) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 4ab7dbc60..183a183b1 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -50,7 +50,7 @@ def test_stop_database(): # test stopping database should shutdown response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') # test rdsclient error when trying to stop an already stopped database conn.stop_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) # test stopping a stopped database with snapshot should error and no snapshot should exist for that call @@ -76,10 +76,10 @@ def test_start_database(): mydb['DBInstanceStatus'].should.equal('available') # test starting an already started database should error conn.start_db_instance.when.called_with(DBInstanceIdentifier=mydb['DBInstanceIdentifier']).should.throw(ClientError) - # stop and test start - should go from shutdown to available, create snapshot and check snapshot + # stop and test start - should go from stopped to available, create snapshot and check snapshot response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier'], DBSnapshotIdentifier='rocky4570-rds-snap') response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') response = conn.describe_db_snapshots() response['DBSnapshots'][0]['DBSnapshotIdentifier'].should.equal('rocky4570-rds-snap') response = conn.start_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) @@ -93,7 +93,7 @@ def test_start_database(): # test stopping database not invoking snapshot should succeed. response = conn.stop_db_instance(DBInstanceIdentifier=mydb['DBInstanceIdentifier']) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) - response['DBInstance']['DBInstanceStatus'].should.equal('shutdown') + response['DBInstance']['DBInstanceStatus'].should.equal('stopped') @mock_rds2 @@ -225,6 +225,28 @@ def test_modify_db_instance(): instances['DBInstances'][0]['AllocatedStorage'].should.equal(20) +@mock_rds2 +def test_rename_db_instance(): + conn = boto3.client('rds', region_name='us-west-2') + database = conn.create_db_instance(DBInstanceIdentifier='db-master-1', + AllocatedStorage=10, + DBInstanceClass='postgres', + Engine='db.m1.small', + MasterUsername='root', + MasterUserPassword='hunter2', + Port=1234, + DBSecurityGroups=['my_sg']) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-1") + list(instances['DBInstances']).should.have.length_of(1) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-2").should.throw(ClientError) + conn.modify_db_instance(DBInstanceIdentifier='db-master-1', + NewDBInstanceIdentifier='db-master-2', + ApplyImmediately=True) + conn.describe_db_instances.when.called_with(DBInstanceIdentifier="db-master-1").should.throw(ClientError) + instances = conn.describe_db_instances(DBInstanceIdentifier="db-master-2") + list(instances['DBInstances']).should.have.length_of(1) + + @mock_rds2 def test_modify_non_existant_database(): conn = boto3.client('rds', region_name='us-west-2') diff --git a/tests/test_redshift/test_server.py b/tests/test_redshift/test_server.py index 4e950fc74..c37e9cab7 100644 --- a/tests/test_redshift/test_server.py +++ b/tests/test_redshift/test_server.py @@ -19,4 +19,4 @@ def test_describe_clusters(): res = test_client.get('/?Action=DescribeClusters') result = res.data.decode("utf-8") - result.should.contain("") diff --git a/tests/test_resourcegroupstaggingapi/__init__.py b/tests/test_resourcegroupstaggingapi/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py new file mode 100644 index 000000000..cce0f1b99 --- /dev/null +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -0,0 +1,226 @@ +from __future__ import unicode_literals + +import boto3 +import sure # noqa +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2 + + +@mock_s3 +@mock_resourcegroupstaggingapi +def test_get_resources_s3(): + # Tests pagination + s3_client = boto3.client('s3', region_name='eu-central-1') + + # Will end up having key1,key2,key3,key4 + response_keys = set() + + # Create 4 buckets + for i in range(1, 5): + i_str = str(i) + s3_client.create_bucket(Bucket='test_bucket' + i_str) + s3_client.put_bucket_tagging( + Bucket='test_bucket' + i_str, + Tagging={'TagSet': [{'Key': 'key' + i_str, 'Value': 'value' + i_str}]} + ) + response_keys.add('key' + i_str) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources(ResourcesPerPage=2) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(2) + + resp = rtapi.get_resources( + ResourcesPerPage=2, + PaginationToken=resp['PaginationToken'] + ) + for resource in resp['ResourceTagMappingList']: + response_keys.remove(resource['Tags'][0]['Key']) + + response_keys.should.have.length_of(0) + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_resources_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + instances = client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + instance_id = instances['Instances'][0]['InstanceId'] + image_id = client.create_image(Name='testami', InstanceId=instance_id)['ImageId'] + + client.create_tags( + Resources=[image_id], + Tags=[{'Key': 'ami', 'Value': 'test'}] + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_resources() + # Check we have 1 entry for Instance, 1 Entry for AMI + resp['ResourceTagMappingList'].should.have.length_of(2) + + # 1 Entry for AMI + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:image']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('image/') + + # As were iterating the same data, this rules out that the test above was a fluke + resp = rtapi.get_resources(ResourceTypeFilters=['ec2:instance']) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + # Basic test of tag filters + resp = rtapi.get_resources(TagFilters=[{'Key': 'MY_TAG1', 'Values': ['MY_VALUE1', 'some_other_value']}]) + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('instance/') + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_keys_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_keys() + + resp['TagKeys'].should.contain('MY_TAG1') + resp['TagKeys'].should.contain('MY_TAG2') + resp['TagKeys'].should.contain('MY_TAG3') + + # TODO test pagenation + + +@mock_ec2 +@mock_resourcegroupstaggingapi +def test_get_tag_values_ec2(): + client = boto3.client('ec2', region_name='eu-central-1') + + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE1', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE2', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE3', + }, + ] + }, + ], + ) + client.run_instances( + ImageId='ami-123', + MinCount=1, + MaxCount=1, + InstanceType='t2.micro', + TagSpecifications=[ + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG1', + 'Value': 'MY_VALUE4', + }, + { + 'Key': 'MY_TAG2', + 'Value': 'MY_VALUE5', + }, + ], + }, + { + 'ResourceType': 'instance', + 'Tags': [ + { + 'Key': 'MY_TAG3', + 'Value': 'MY_VALUE6', + }, + ] + }, + ], + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='eu-central-1') + resp = rtapi.get_tag_values(Key='MY_TAG1') + + resp['TagValues'].should.contain('MY_VALUE1') + resp['TagValues'].should.contain('MY_VALUE4') + + # TODO test pagenation \ No newline at end of file diff --git a/tests/test_resourcegroupstaggingapi/test_server.py b/tests/test_resourcegroupstaggingapi/test_server.py new file mode 100644 index 000000000..311b1f03e --- /dev/null +++ b/tests/test_resourcegroupstaggingapi/test_server.py @@ -0,0 +1,24 @@ +from __future__ import unicode_literals + +import sure # noqa + +import moto.server as server + +''' +Test the different server responses +''' + + +def test_resourcegroupstaggingapi_list(): + backend = server.create_backend_app("resourcegroupstaggingapi") + test_client = backend.test_client() + # do test + + headers = { + 'X-Amz-Target': 'ResourceGroupsTaggingAPI_20170126.GetResources', + 'X-Amz-Date': '20171114T234623Z' + } + resp = test_client.post('/', headers=headers, data='{}') + + assert resp.status_code == 200 + assert b'ResourceTagMappingList' in resp.data diff --git a/tests/test_route53/test_route53.py b/tests/test_route53/test_route53.py index ac8d6e7ad..76217b9d9 100644 --- a/tests/test_route53/test_route53.py +++ b/tests/test_route53/test_route53.py @@ -119,8 +119,10 @@ def test_rrset(): rrsets = conn.get_all_rrsets( zoneid, name="bar.foo.testdns.aws.com", type="A") - rrsets.should.have.length_of(1) - rrsets[0].resource_records[0].should.equal('5.6.7.8') + rrsets.should.have.length_of(2) + resource_records = [rr for rr_set in rrsets for rr in rr_set.resource_records] + resource_records.should.contain('1.2.3.4') + resource_records.should.contain('5.6.7.8') rrsets = conn.get_all_rrsets( zoneid, name="foo.foo.testdns.aws.com", type="A") @@ -160,7 +162,10 @@ def test_alias_rrset(): changes.commit() rrsets = conn.get_all_rrsets(zoneid, type="A") - rrsets.should.have.length_of(1) + rrset_records = [(rr_set.name, rr) for rr_set in rrsets for rr in rr_set.resource_records] + rrset_records.should.have.length_of(2) + rrset_records.should.contain(('foo.alias.testdns.aws.com', 'foo.testdns.aws.com')) + rrset_records.should.contain(('bar.alias.testdns.aws.com', 'bar.testdns.aws.com')) rrsets[0].resource_records[0].should.equal('foo.testdns.aws.com') rrsets = conn.get_all_rrsets(zoneid, type="CNAME") rrsets.should.have.length_of(1) @@ -647,3 +652,60 @@ def test_change_resource_record_invalid(): response = conn.list_resource_record_sets(HostedZoneId=hosted_zone_id) len(response['ResourceRecordSets']).should.equal(0) + + +@mock_route53 +def test_list_resource_record_sets_name_type_filters(): + conn = boto3.client('route53', region_name='us-east-1') + create_hosted_zone_response = conn.create_hosted_zone( + Name="db.", + CallerReference=str(hash('foo')), + HostedZoneConfig=dict( + PrivateZone=True, + Comment="db", + ) + ) + hosted_zone_id = create_hosted_zone_response['HostedZone']['Id'] + + def create_resource_record_set(rec_type, rec_name): + payload = { + 'Comment': 'create {} record {}'.format(rec_type, rec_name), + 'Changes': [ + { + 'Action': 'CREATE', + 'ResourceRecordSet': { + 'Name': rec_name, + 'Type': rec_type, + 'TTL': 10, + 'ResourceRecords': [{ + 'Value': '127.0.0.1' + }] + } + } + ] + } + conn.change_resource_record_sets(HostedZoneId=hosted_zone_id, ChangeBatch=payload) + + # record_type, record_name + all_records = [ + ('A', 'a.a.db'), + ('A', 'a.b.db'), + ('A', 'b.b.db'), + ('CNAME', 'b.b.db'), + ('CNAME', 'b.c.db'), + ('CNAME', 'c.c.db') + ] + for record_type, record_name in all_records: + create_resource_record_set(record_type, record_name) + + start_with = 2 + response = conn.list_resource_record_sets( + HostedZoneId=hosted_zone_id, + StartRecordType=all_records[start_with][0], + StartRecordName=all_records[start_with][1] + ) + + returned_records = [(record['Type'], record['Name']) for record in response['ResourceRecordSets']] + len(returned_records).should.equal(len(all_records) - start_with) + for desired_record in all_records[start_with:]: + returned_records.should.contain(desired_record) diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index e4cb499b9..829941d79 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - from __future__ import unicode_literals import datetime @@ -1364,6 +1363,29 @@ def test_boto3_head_object_with_versioning(): old_head_object['ContentLength'].should.equal(len(old_content)) +@mock_s3 +def test_boto3_copy_object_with_versioning(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah', CreateBucketConfiguration={'LocationConstraint': 'eu-west-1'}) + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + + obj1_version = client.get_object(Bucket='blah', Key='test1')['VersionId'] + obj2_version = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Versions should be the same + obj1_version.should.equal(obj2_version) + + client.copy_object(CopySource={'Bucket': 'blah', 'Key': 'test1'}, Bucket='blah', Key='test2') + obj2_version_new = client.get_object(Bucket='blah', Key='test2')['VersionId'] + + # Version should be different to previous version + obj2_version_new.should_not.equal(obj2_version) + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1752,6 +1774,30 @@ def test_boto3_put_object_tagging(): resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) +@mock_s3 +def test_boto3_put_object_tagging_with_single_tag(): + s3 = boto3.client('s3', region_name='us-east-1') + bucket_name = 'mybucket' + key = 'key-with-tags' + s3.create_bucket(Bucket=bucket_name) + + s3.put_object( + Bucket=bucket_name, + Key=key, + Body='test' + ) + + resp = s3.put_object_tagging( + Bucket=bucket_name, + Key=key, + Tagging={'TagSet': [ + {'Key': 'item1', 'Value': 'foo'} + ]} + ) + + resp['ResponseMetadata']['HTTPStatusCode'].should.equal(200) + + @mock_s3 def test_boto3_get_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1818,7 +1864,7 @@ def test_boto3_list_object_versions(): def test_boto3_delete_markers(): s3 = boto3.client('s3', region_name='us-east-1') bucket_name = 'mybucket' - key = 'key-with-versions' + key = u'key-with-versions-and-unicode-ó' s3.create_bucket(Bucket=bucket_name) s3.put_bucket_versioning( Bucket=bucket_name, @@ -1833,10 +1879,9 @@ def test_boto3_delete_markers(): Key=key, Body=body ) - s3.delete_object( - Bucket=bucket_name, - Key=key - ) + + s3.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) + with assert_raises(ClientError) as e: s3.get_object( Bucket=bucket_name, @@ -1858,12 +1903,18 @@ def test_boto3_delete_markers(): Bucket=bucket_name ) response['Versions'].should.have.length_of(2) - response['Versions'][-1]['IsLatest'].should.be.true - response['Versions'][0]['IsLatest'].should.be.false - [(key_metadata['Key'], key_metadata['VersionId']) - for key_metadata in response['Versions']].should.equal( - [('key-with-versions', '0'), ('key-with-versions', '1')] - ) + + # We've asserted there is only 2 records so one is newest, one is oldest + latest = list(filter(lambda item: item['IsLatest'], response['Versions']))[0] + oldest = list(filter(lambda item: not item['IsLatest'], response['Versions']))[0] + + # Double check ordering of version ID's + latest['VersionId'].should.equal('1') + oldest['VersionId'].should.equal('0') + + # Double check the name is still unicode + latest['Key'].should.equal('key-with-versions-and-unicode-ó') + oldest['Key'].should.equal('key-with-versions-and-unicode-ó') @mock_s3 diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 6228f212f..1540ceb84 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -177,3 +177,33 @@ def test_publish_to_http(): response = conn.publish( TopicArn=topic_arn, Message="my message", Subject="my subject") message_id = response['MessageId'] + + +@mock_sqs +@mock_sns +def test_publish_subject(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + subject1 = 'test subject' + subject2 = 'test subject' * 20 + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject1) + + # Just that it doesnt error is a pass + try: + with freeze_time("2015-01-01 12:00:00"): + conn.publish(TopicArn=topic_arn, Message=message, Subject=subject2) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameter') + else: + raise RuntimeError('Should have raised an InvalidParameter exception') diff --git a/tests/test_sns/test_server.py b/tests/test_sns/test_server.py index ce505278f..465dfa2c2 100644 --- a/tests/test_sns/test_server.py +++ b/tests/test_sns/test_server.py @@ -13,12 +13,12 @@ def test_sns_server_get(): backend = server.create_backend_app("sns") test_client = backend.test_client() - topic_data = test_client.action_data("CreateTopic", Name="test topic") + topic_data = test_client.action_data("CreateTopic", Name="testtopic") topic_data.should.contain("CreateTopicResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:test topic") + "arn:aws:sns:us-east-1:123456789012:testtopic") topics_data = test_client.action_data("ListTopics") topics_data.should.contain("ListTopicsResult") topic_data.should.contain( - "arn:aws:sns:us-east-1:123456789012:test topic") + "arn:aws:sns:us-east-1:123456789012:testtopic") diff --git a/tests/test_sns/test_topics_boto3.py b/tests/test_sns/test_topics_boto3.py index a9c2a2904..95dd41f89 100644 --- a/tests/test_sns/test_topics_boto3.py +++ b/tests/test_sns/test_topics_boto3.py @@ -31,6 +31,26 @@ def test_create_and_delete_topic(): topics = topics_json["Topics"] topics.should.have.length_of(0) +@mock_sns +def test_create_topic_should_be_indempodent(): + conn = boto3.client("sns", region_name="us-east-1") + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + conn.set_topic_attributes( + TopicArn=topic_arn, + AttributeName="DisplayName", + AttributeValue="should_be_set" + ) + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") + + #recreate topic to prove indempodentcy + topic_arn = conn.create_topic(Name="some-topic")['TopicArn'] + topic_display_name = conn.get_topic_attributes( + TopicArn=topic_arn + )['Attributes']['DisplayName'] + topic_display_name.should.be.equal("should_be_set") @mock_sns def test_get_missing_topic(): @@ -38,6 +58,27 @@ def test_get_missing_topic(): conn.get_topic_attributes.when.called_with( TopicArn="a-fake-arn").should.throw(ClientError) +@mock_sns +def test_create_topic_must_meet_constraints(): + conn = boto3.client("sns", region_name="us-east-1") + common_random_chars = [':', ";", "!", "@", "|", "^", "%"] + for char in common_random_chars: + conn.create_topic.when.called_with( + Name="no%s_invalidchar" % char).should.throw(ClientError) + conn.create_topic.when.called_with( + Name="no spaces allowed").should.throw(ClientError) + + +@mock_sns +def test_create_topic_should_be_of_certain_length(): + conn = boto3.client("sns", region_name="us-east-1") + too_short = "" + conn.create_topic.when.called_with( + Name=too_short).should.throw(ClientError) + too_long = "x" * 257 + conn.create_topic.when.called_with( + Name=too_long).should.throw(ClientError) + @mock_sns def test_create_topic_in_multiple_regions(): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index 9c439eb68..c761ec8d9 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -1,20 +1,26 @@ # -*- coding: utf-8 -*- from __future__ import unicode_literals +import os import boto import boto3 import botocore.exceptions +from botocore.exceptions import ClientError from boto.exception import SQSError from boto.sqs.message import RawMessage, Message +from freezegun import freeze_time import base64 +import json import sure # noqa import time +import uuid from moto import settings, mock_sqs, mock_sqs_deprecated from tests.helpers import requires_boto_gte import tests.backport_assert_raises # noqa from nose.tools import assert_raises +from nose import SkipTest @mock_sqs @@ -33,6 +39,7 @@ def test_create_fifo_queue_fail(): else: raise RuntimeError('Should of raised InvalidParameterValue Exception') + @mock_sqs def test_create_fifo_queue(): sqs = boto3.client('sqs', region_name='us-east-1') @@ -49,10 +56,10 @@ def test_create_fifo_queue(): response['Attributes']['FifoQueue'].should.equal('true') - @mock_sqs def test_create_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') + new_queue = sqs.create_queue(QueueName='test-queue') new_queue.should_not.be.none new_queue.should.have.property('url').should.contain('test-queue') @@ -66,10 +73,19 @@ def test_create_queue(): @mock_sqs -def test_get_inexistent_queue(): +def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') - sqs.get_queue_by_name.when.called_with( - QueueName='nonexisting-queue').should.throw(botocore.exceptions.ClientError) + with assert_raises(ClientError) as err: + sqs.get_queue_by_name(QueueName='nonexisting-queue') + ex = err.exception + ex.operation_name.should.equal('GetQueueUrl') + ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + + with assert_raises(ClientError) as err: + sqs.Queue('http://whatever-incorrect-queue-address').load() + ex = err.exception + ex.operation_name.should.equal('GetQueueAttributes') + ex.response['Error']['Code'].should.equal('QueueDoesNotExist') @mock_sqs @@ -82,8 +98,6 @@ def test_message_send_without_attributes(): msg.get('MD5OfMessageBody').should.equal( '58fd9edd83341c29f1aebba81c31e257') msg.shouldnt.have.key('MD5OfMessageAttributes') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -107,8 +121,6 @@ def test_message_send_with_attributes(): '58fd9edd83341c29f1aebba81c31e257') msg.get('MD5OfMessageAttributes').should.equal( '235c5c510d26fb653d073faed50ae77c') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -132,8 +144,6 @@ def test_message_with_complex_attributes(): '58fd9edd83341c29f1aebba81c31e257') msg.get('MD5OfMessageAttributes').should.equal( '8ae21a7957029ef04146b42aeaa18a22') - msg.get('ResponseMetadata', {}).get('RequestId').should.equal( - '27daac76-34dd-47df-bd01-1f6e873584a0') msg.get('MessageId').should_not.contain(' \n') messages = queue.receive_messages() @@ -744,3 +754,181 @@ def test_delete_message_after_visibility_timeout(): m1_retrieved.delete() assert new_queue.count() == 0 + + +@mock_sqs +def test_batch_change_message_visibility(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + with freeze_time("2015-01-01 12:00:00"): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg2') + sqs.send_message(QueueUrl=queue_url, MessageBody='msg3') + + with freeze_time("2015-01-01 12:01:00"): + receive_resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=2) + len(receive_resp['Messages']).should.equal(2) + + handles = [item['ReceiptHandle'] for item in receive_resp['Messages']] + entries = [{'Id': str(uuid.uuid4()), 'ReceiptHandle': handle, 'VisibilityTimeout': 43200} for handle in handles] + + resp = sqs.change_message_visibility_batch(QueueUrl=queue_url, Entries=entries) + len(resp['Successful']).should.equal(2) + + with freeze_time("2015-01-01 14:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-01 16:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(1) + + with freeze_time("2015-01-02 12:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url, MaxNumberOfMessages=3) + len(resp['Messages']).should.equal(3) + + +@mock_sqs +def test_permissions(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.add_permission(QueueUrl=queue_url, Label='account1', AWSAccountIds=['111111111111'], Actions=['*']) + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SendMessage']) + + with assert_raises(ClientError): + client.add_permission(QueueUrl=queue_url, Label='account2', AWSAccountIds=['222211111111'], Actions=['SomeRubbish']) + + client.remove_permission(QueueUrl=queue_url, Label='account2') + + with assert_raises(ClientError): + client.remove_permission(QueueUrl=queue_url, Label='non_existant') + + +@mock_sqs +def test_tags(): + client = boto3.client('sqs', region_name='us-east-1') + + resp = client.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url = resp['QueueUrl'] + + client.tag_queue( + QueueUrl=queue_url, + Tags={ + 'test1': 'value1', + 'test2': 'value2', + } + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should.contain('test2') + + client.untag_queue( + QueueUrl=queue_url, + TagKeys=['test2'] + ) + + resp = client.list_queue_tags(QueueUrl=queue_url) + resp['Tags'].should.contain('test1') + resp['Tags'].should_not.contain('test2') + + +@mock_sqs +def test_create_fifo_queue_with_dlq(): + sqs = boto3.client('sqs', region_name='us-east-1') + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-dlr-queue', + Attributes={'FifoQueue': 'false'} + ) + queue_url2 = resp['QueueUrl'] + queue_arn2 = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes']['QueueArn'] + + sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + + # Cant have fifo queue with non fifo DLQ + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn2, 'maxReceiveCount': 2}) + } + ) + + +@mock_sqs +def test_queue_with_dlq(): + if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': + raise SkipTest('Cant manipulate time in server mode') + + sqs = boto3.client('sqs', region_name='us-east-1') + + with freeze_time("2015-01-01 12:00:00"): + resp = sqs.create_queue( + QueueName='test-dlr-queue.fifo', + Attributes={'FifoQueue': 'true'} + ) + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + + resp = sqs.create_queue( + QueueName='test-queue.fifo', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1, 'maxReceiveCount': 2}) + } + ) + queue_url2 = resp['QueueUrl'] + + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg1') + sqs.send_message(QueueUrl=queue_url2, MessageBody='msg2') + + with freeze_time("2015-01-01 13:00:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:01:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + with freeze_time("2015-01-01 13:02:00"): + resp = sqs.receive_message(QueueUrl=queue_url2, VisibilityTimeout=30, WaitTimeSeconds=0) + len(resp['Messages']).should.equal(1) + + resp = sqs.receive_message(QueueUrl=queue_url1, VisibilityTimeout=30, WaitTimeSeconds=0) + resp['Messages'][0]['Body'].should.equal('msg1') + + # Might as well test list source queues + + resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) + resp['queueUrls'][0].should.equal(queue_url2) diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index 7f4aca533..ff8e5e8a4 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import boto3 import botocore.exceptions import sure # noqa +import datetime from moto import mock_ssm @@ -47,6 +48,51 @@ def test_delete_parameters(): len(response['Parameters']).should.equal(0) +@mock_ssm +def test_get_parameters_by_path(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='/foo/name1', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/foo/name2', + Description='A test parameter', + Value='value2', + Type='String') + + client.put_parameter( + Name='/bar/name3', + Description='A test parameter', + Value='value3', + Type='String') + + client.put_parameter( + Name='/bar/name3/name4', + Description='A test parameter', + Value='value4', + Type='String') + + response = client.get_parameters_by_path(Path='/foo') + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value1', 'value2']) + ) + + response = client.get_parameters_by_path(Path='/bar', Recursive=False) + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Value'].should.equal('value3') + + response = client.get_parameters_by_path(Path='/bar', Recursive=True) + len(response['Parameters']).should.equal(2) + {p['Value'] for p in response['Parameters']}.should.equal( + set(['value3', 'value4']) + ) + + @mock_ssm def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') @@ -67,6 +113,46 @@ def test_put_parameter(): response['Parameters'][0]['Name'].should.equal('test') response['Parameters'][0]['Value'].should.equal('value') response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(1) + + client.put_parameter( + Name='test', + Description='desc 3', + Value='value 3', + Type='String', + Overwrite=True) + + response = client.get_parameters( + Names=[ + 'test' + ], + WithDecryption=False) + + # without overwrite nothing change + len(response['Parameters']).should.equal(1) + response['Parameters'][0]['Name'].should.equal('test') + response['Parameters'][0]['Value'].should.equal('value 3') + response['Parameters'][0]['Type'].should.equal('String') + response['Parameters'][0]['Version'].should.equal(2) @mock_ssm @@ -234,6 +320,33 @@ def test_describe_parameters_filter_keyid(): response['Parameters'][0]['Type'].should.equal('SecureString') ''.should.equal(response.get('NextToken', '')) +@mock_ssm +def test_describe_parameters_attributes(): + client = boto3.client('ssm', region_name='us-east-1') + + client.put_parameter( + Name='aa', + Value='11', + Type='String', + Description='my description' + ) + + client.put_parameter( + Name='bb', + Value='22', + Type='String' + ) + + response = client.describe_parameters() + len(response['Parameters']).should.equal(2) + + response['Parameters'][0]['Description'].should.equal('my description') + response['Parameters'][0]['Version'].should.equal(1) + response['Parameters'][0]['LastModifiedDate'].should.be.a(datetime.date) + response['Parameters'][0]['LastModifiedUser'].should.equal('N/A') + + response['Parameters'][1].get('Description').should.be.none + response['Parameters'][1]['Version'].should.equal(1) @mock_ssm def test_get_parameter_invalid(): diff --git a/tests/test_xray/test_xray_client.py b/tests/test_xray/test_xray_client.py new file mode 100644 index 000000000..0cd948950 --- /dev/null +++ b/tests/test_xray/test_xray_client.py @@ -0,0 +1,72 @@ +from __future__ import unicode_literals +from moto import mock_xray_client, XRaySegment, mock_dynamodb2 +import sure # noqa +import boto3 + +from moto.xray.mock_client import MockEmitter +import aws_xray_sdk.core as xray_core +import aws_xray_sdk.core.patcher as xray_core_patcher + +import botocore.client +import botocore.endpoint +original_make_api_call = botocore.client.BaseClient._make_api_call +original_encode_headers = botocore.endpoint.Endpoint._encode_headers + +import requests +original_session_request = requests.Session.request +original_session_prep_request = requests.Session.prepare_request + + +@mock_xray_client +@mock_dynamodb2 +def test_xray_dynamo_request_id(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + client = boto3.client('dynamodb', region_name='us-east-1') + + with XRaySegment(): + resp = client.list_tables() + resp['ResponseMetadata'].should.contain('RequestId') + id1 = resp['ResponseMetadata']['RequestId'] + + with XRaySegment(): + client.list_tables() + resp = client.list_tables() + id2 = resp['ResponseMetadata']['RequestId'] + + id1.should_not.equal(id2) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_udp_emitter_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + assert isinstance(xray_core.xray_recorder._emitter, MockEmitter) + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) + + +@mock_xray_client +def test_xray_context_patched(): + # Could be ran in any order, so we need to tell sdk that its been unpatched + xray_core_patcher._PATCHED_MODULES = set() + xray_core.patch_all() + + xray_core.xray_recorder._context.context_missing.should.equal('LOG_ERROR') + + setattr(botocore.client.BaseClient, '_make_api_call', original_make_api_call) + setattr(botocore.endpoint.Endpoint, '_encode_headers', original_encode_headers) + setattr(requests.Session, 'request', original_session_request) + setattr(requests.Session, 'prepare_request', original_session_prep_request) diff --git a/tox.ini b/tox.ini index 3fe5d0141..0f3f1466a 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py26, py27, py33, py34 +envlist = py27, py36 [testenv] deps = diff --git a/wait_for.py b/wait_for.py index ea3639d16..d313ea5a9 100755 --- a/wait_for.py +++ b/wait_for.py @@ -24,7 +24,7 @@ while True: break except EXCEPTIONS: elapsed_s = time.time() - start_ts - if elapsed_s > 30: + if elapsed_s > 60: raise print('.')