diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 000000000..3e15854ef --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,7 @@ +[bumpversion] +current_version = 1.3.3 + +[bumpversion:file:setup.py] + +[bumpversion:file:moto/__init__.py] + diff --git a/.gitignore b/.gitignore index 18026d60f..c4b8c5034 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ build/ .DS_Store python_env .ropeproject/ +.pytest_cache/ + diff --git a/AUTHORS.md b/AUTHORS.md index 1771d1a78..6b7c96291 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -49,3 +49,7 @@ Moto is written by Steve Pulec with contributions from: * [Michael van Tellingen](https://github.com/mvantellingen) * [Jessie Nadler](https://github.com/nadlerjessie) * [Alex Morken](https://github.com/alexmorken) +* [Clive Li](https://github.com/cliveli) +* [Jim Shields](https://github.com/jimjshields) +* [William Richard](https://github.com/william-richard) +* [Alex Casalboni](https://github.com/alexcasalboni) diff --git a/CHANGELOG.md b/CHANGELOG.md index 740aac2cb..fb3a5d8d5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,58 @@ Moto Changelog =================== -Latest +1.3.3 ------ + + * Fix a regression in S3 url regexes + * APIGateway region fixes + * ECS improvements + * Add @mock_cognitoidentity, thanks to @brcoding + + +1.3.2 +------ +The huge change in this version is that the responses library is no longer vendored. Many developers are now unblocked. Kudos to @spulec for the fix. + + * Fix route53 TTL bug + * Added filtering support for S3 lifecycle + * unvendoring responses + +1.3.0 +------ + +Dozens of major endpoint additions in this release. Highlights include: + + * Fixed AMI tests and the Travis build setup + * SNS improvements + * Dynamodb improvements + * EBS improvements + * Redshift improvements + * RDS snapshot improvements + * S3 improvements + * Cloudwatch improvements + * SSM improvements + * IAM improvements + * ELBV1 and ELBV2 improvements + * Lambda improvements + * EC2 spot pricing improvements + * ApiGateway improvements + * VPC improvements + +1.2.0 +------ + + * Supports filtering AMIs by self + * Implemented signal_workflow_execution for SWF + * Wired SWF backend to the moto server + * Added url decoding to x-amz-copy-source header for copying S3 files + * Revamped lambda function storage to do versioning + * IOT improvements + * RDS improvements + * Implemented CloudWatch get_metric_statistics + * Improved Cloudformation EC2 support + * Implemented Cloudformation change_set endpoints + 1.1.25 ----- diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1266d508e..f28083221 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,4 +1,25 @@ ### Contributing code -If you have improvements to Moto, send us your pull requests! For those -just getting started, Github has a [howto](https://help.github.com/articles/using-pull-requests/). +Moto has a [Code of Conduct](https://github.com/spulec/moto/blob/master/CODE_OF_CONDUCT.md), you can expect to be treated with respect at all times when interacting with this project. + +## Is there a missing feature? + +Moto is easier to contribute to than you probably think. There's [a list of which endpoints have been implemented](https://github.com/spulec/moto/blob/master/IMPLEMENTATION_COVERAGE.md) and we invite you to add new endpoints to existing services or to add new services. + +How to teach Moto to support a new AWS endpoint: + +* Create an issue describing what's missing. This is where we'll all talk about the new addition and help you get it done. +* Create a [pull request](https://help.github.com/articles/using-pull-requests/) and mention the issue # in the PR description. +* Try to add a failing test case. For example, if you're trying to implement `boto3.client('acm').import_certificate()` you'll want to add a new method called `def test_import_certificate` to `tests/test_acm/test_acm.py`. +* If you can also implement the code that gets that test passing that's great. If not, just ask the community for a hand and somebody will assist you. + +# Maintainers + +## Releasing a new version of Moto + +You'll need a PyPi account and a Dockerhub account to release Moto. After we release a new PyPi package we build and push the [motoserver/moto](https://hub.docker.com/r/motoserver/moto/) Docker image. + +* First, `scripts/bump_version` modifies the version and opens a PR +* Then, merge the new pull request +* Finally, generate and ship the new artifacts with `make publish` + diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index 76944e3fe..b5a280640 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1,8 +1,9 @@ -## acm - 50% implemented +## acm - 41% implemented - [X] add_tags_to_certificate - [X] delete_certificate - [ ] describe_certificate +- [ ] export_certificate - [X] get_certificate - [ ] import_certificate - [ ] list_certificates @@ -10,8 +11,77 @@ - [X] remove_tags_from_certificate - [X] request_certificate - [ ] resend_validation_email +- [ ] update_certificate_options -## apigateway - 18% implemented +## acm-pca - 0% implemented +- [ ] create_certificate_authority +- [ ] create_certificate_authority_audit_report +- [ ] delete_certificate_authority +- [ ] describe_certificate_authority +- [ ] describe_certificate_authority_audit_report +- [ ] get_certificate +- [ ] get_certificate_authority_certificate +- [ ] get_certificate_authority_csr +- [ ] import_certificate_authority_certificate +- [ ] issue_certificate +- [ ] list_certificate_authorities +- [ ] list_tags +- [ ] revoke_certificate +- [ ] tag_certificate_authority +- [ ] untag_certificate_authority +- [ ] update_certificate_authority + +## alexaforbusiness - 0% implemented +- [ ] associate_contact_with_address_book +- [ ] associate_device_with_room +- [ ] associate_skill_group_with_room +- [ ] create_address_book +- [ ] create_contact +- [ ] create_profile +- [ ] create_room +- [ ] create_skill_group +- [ ] create_user +- [ ] delete_address_book +- [ ] delete_contact +- [ ] delete_profile +- [ ] delete_room +- [ ] delete_room_skill_parameter +- [ ] delete_skill_group +- [ ] delete_user +- [ ] disassociate_contact_from_address_book +- [ ] disassociate_device_from_room +- [ ] disassociate_skill_group_from_room +- [ ] get_address_book +- [ ] get_contact +- [ ] get_device +- [ ] get_profile +- [ ] get_room +- [ ] get_room_skill_parameter +- [ ] get_skill_group +- [ ] list_skills +- [ ] list_tags +- [ ] put_room_skill_parameter +- [ ] resolve_room +- [ ] revoke_invitation +- [ ] search_address_books +- [ ] search_contacts +- [ ] search_devices +- [ ] search_profiles +- [ ] search_rooms +- [ ] search_skill_groups +- [ ] search_users +- [ ] send_invitation +- [ ] start_device_sync +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_address_book +- [ ] update_contact +- [ ] update_device +- [ ] update_profile +- [ ] update_room +- [ ] update_skill_group + +## apigateway - 17% implemented - [ ] create_api_key - [ ] create_authorizer - [ ] create_base_path_mapping @@ -26,6 +96,7 @@ - [X] create_stage - [ ] create_usage_plan - [ ] create_usage_plan_key +- [ ] create_vpc_link - [ ] delete_api_key - [ ] delete_authorizer - [ ] delete_base_path_mapping @@ -46,6 +117,7 @@ - [ ] delete_stage - [ ] delete_usage_plan - [ ] delete_usage_plan_key +- [ ] delete_vpc_link - [ ] flush_stage_authorizers_cache - [ ] flush_stage_cache - [ ] generate_client_certificate @@ -87,11 +159,14 @@ - [ ] get_sdk_types - [X] get_stage - [X] get_stages +- [ ] get_tags - [ ] get_usage - [ ] get_usage_plan - [ ] get_usage_plan_key - [ ] get_usage_plan_keys - [ ] get_usage_plans +- [ ] get_vpc_link +- [ ] get_vpc_links - [ ] import_api_keys - [ ] import_documentation_parts - [ ] import_rest_api @@ -101,8 +176,10 @@ - [ ] put_method - [ ] put_method_response - [ ] put_rest_api +- [ ] tag_resource - [ ] test_invoke_authorizer - [ ] test_invoke_method +- [ ] untag_resource - [ ] update_account - [ ] update_api_key - [ ] update_authorizer @@ -124,6 +201,7 @@ - [X] update_stage - [ ] update_usage - [ ] update_usage_plan +- [ ] update_vpc_link ## application-autoscaling - 0% implemented - [ ] delete_scaling_policy @@ -139,6 +217,7 @@ ## appstream - 0% implemented - [ ] associate_fleet +- [ ] copy_image - [ ] create_directory_config - [ ] create_fleet - [ ] create_image_builder @@ -160,14 +239,46 @@ - [ ] expire_session - [ ] list_associated_fleets - [ ] list_associated_stacks +- [ ] list_tags_for_resource - [ ] start_fleet - [ ] start_image_builder - [ ] stop_fleet - [ ] stop_image_builder +- [ ] tag_resource +- [ ] untag_resource - [ ] update_directory_config - [ ] update_fleet - [ ] update_stack +## appsync - 0% implemented +- [ ] create_api_key +- [ ] create_data_source +- [ ] create_graphql_api +- [ ] create_resolver +- [ ] create_type +- [ ] delete_api_key +- [ ] delete_data_source +- [ ] delete_graphql_api +- [ ] delete_resolver +- [ ] delete_type +- [ ] get_data_source +- [ ] get_graphql_api +- [ ] get_introspection_schema +- [ ] get_resolver +- [ ] get_schema_creation_status +- [ ] get_type +- [ ] list_api_keys +- [ ] list_data_sources +- [ ] list_graphql_apis +- [ ] list_resolvers +- [ ] list_types +- [ ] start_schema_creation +- [ ] update_api_key +- [ ] update_data_source +- [ ] update_graphql_api +- [ ] update_resolver +- [ ] update_type + ## athena - 0% implemented - [ ] batch_get_named_query - [ ] batch_get_query_execution @@ -181,7 +292,7 @@ - [ ] start_query_execution - [ ] stop_query_execution -## autoscaling - 42% implemented +## autoscaling - 44% implemented - [X] attach_instances - [X] attach_load_balancer_target_groups - [X] attach_load_balancers @@ -231,10 +342,16 @@ - [X] set_desired_capacity - [X] set_instance_health - [ ] set_instance_protection -- [ ] suspend_processes +- [X] suspend_processes - [ ] terminate_instance_in_auto_scaling_group - [X] update_auto_scaling_group +## autoscaling-plans - 0% implemented +- [ ] create_scaling_plan +- [ ] delete_scaling_plan +- [ ] describe_scaling_plan_resources +- [ ] describe_scaling_plans + ## batch - 93% implemented - [ ] cancel_job - [X] create_compute_environment @@ -268,6 +385,26 @@ - [ ] update_notification - [ ] update_subscriber +## ce - 0% implemented +- [ ] get_cost_and_usage +- [ ] get_dimension_values +- [ ] get_reservation_coverage +- [ ] get_reservation_purchase_recommendation +- [ ] get_reservation_utilization +- [ ] get_tags + +## cloud9 - 0% implemented +- [ ] create_environment_ec2 +- [ ] create_environment_membership +- [ ] delete_environment +- [ ] delete_environment_membership +- [ ] describe_environment_memberships +- [ ] describe_environment_status +- [ ] describe_environments +- [ ] list_environments +- [ ] update_environment +- [ ] update_environment_membership + ## clouddirectory - 0% implemented - [ ] add_facet_to_object - [ ] apply_schema @@ -294,6 +431,7 @@ - [ ] detach_typed_link - [ ] disable_directory - [ ] enable_directory +- [ ] get_applied_schema_version - [ ] get_directory - [ ] get_facet - [ ] get_object_information @@ -328,8 +466,10 @@ - [ ] update_object_attributes - [ ] update_schema - [ ] update_typed_link_facet +- [ ] upgrade_applied_schema +- [ ] upgrade_published_schema -## cloudformation - 20% implemented +## cloudformation - 21% implemented - [ ] cancel_update_stack - [ ] continue_update_rollback - [X] create_change_set @@ -350,7 +490,7 @@ - [ ] describe_stack_set_operation - [X] describe_stacks - [ ] estimate_template_cost -- [ ] execute_change_set +- [X] execute_change_set - [ ] get_stack_policy - [ ] get_template - [ ] get_template_summary @@ -367,6 +507,7 @@ - [ ] signal_resource - [ ] stop_stack_set_operation - [X] update_stack +- [ ] update_stack_instances - [ ] update_stack_set - [ ] update_termination_protection - [ ] validate_template @@ -375,30 +516,48 @@ - [ ] create_cloud_front_origin_access_identity - [ ] create_distribution - [ ] create_distribution_with_tags +- [ ] create_field_level_encryption_config +- [ ] create_field_level_encryption_profile - [ ] create_invalidation +- [ ] create_public_key - [ ] create_streaming_distribution - [ ] create_streaming_distribution_with_tags - [ ] delete_cloud_front_origin_access_identity - [ ] delete_distribution +- [ ] delete_field_level_encryption_config +- [ ] delete_field_level_encryption_profile +- [ ] delete_public_key - [ ] delete_service_linked_role - [ ] delete_streaming_distribution - [ ] get_cloud_front_origin_access_identity - [ ] get_cloud_front_origin_access_identity_config - [ ] get_distribution - [ ] get_distribution_config +- [ ] get_field_level_encryption +- [ ] get_field_level_encryption_config +- [ ] get_field_level_encryption_profile +- [ ] get_field_level_encryption_profile_config - [ ] get_invalidation +- [ ] get_public_key +- [ ] get_public_key_config - [ ] get_streaming_distribution - [ ] get_streaming_distribution_config - [ ] list_cloud_front_origin_access_identities - [ ] list_distributions - [ ] list_distributions_by_web_acl_id +- [ ] list_field_level_encryption_configs +- [ ] list_field_level_encryption_profiles - [ ] list_invalidations +- [ ] list_public_keys - [ ] list_streaming_distributions - [ ] list_tags_for_resource - [ ] tag_resource - [ ] untag_resource - [ ] update_cloud_front_origin_access_identity - [ ] update_distribution +- [ ] update_field_level_encryption_config +- [ ] update_field_level_encryption_profile +- [ ] update_public_key - [ ] update_streaming_distribution ## cloudhsm - 0% implemented @@ -482,7 +641,7 @@ - [ ] stop_logging - [ ] update_trail -## cloudwatch - 53% implemented +## cloudwatch - 56% implemented - [X] delete_alarms - [X] delete_dashboards - [ ] describe_alarm_history @@ -491,7 +650,8 @@ - [ ] disable_alarm_actions - [ ] enable_alarm_actions - [X] get_dashboard -- [ ] get_metric_statistics +- [ ] get_metric_data +- [X] get_metric_statistics - [X] list_dashboards - [ ] list_metrics - [X] put_dashboard @@ -507,6 +667,7 @@ - [ ] create_webhook - [ ] delete_project - [ ] delete_webhook +- [ ] invalidate_project_cache - [ ] list_builds - [ ] list_builds_for_project - [ ] list_curated_environment_images @@ -514,24 +675,43 @@ - [ ] start_build - [ ] stop_build - [ ] update_project +- [ ] update_webhook ## codecommit - 0% implemented - [ ] batch_get_repositories - [ ] create_branch +- [ ] create_pull_request - [ ] create_repository - [ ] delete_branch +- [ ] delete_comment_content - [ ] delete_repository +- [ ] describe_pull_request_events - [ ] get_blob - [ ] get_branch +- [ ] get_comment +- [ ] get_comments_for_compared_commit +- [ ] get_comments_for_pull_request - [ ] get_commit - [ ] get_differences +- [ ] get_merge_conflicts +- [ ] get_pull_request - [ ] get_repository - [ ] get_repository_triggers - [ ] list_branches +- [ ] list_pull_requests - [ ] list_repositories +- [ ] merge_pull_request_by_fast_forward +- [ ] post_comment_for_compared_commit +- [ ] post_comment_for_pull_request +- [ ] post_comment_reply +- [ ] put_file - [ ] put_repository_triggers - [ ] test_repository_triggers +- [ ] update_comment - [ ] update_default_branch +- [ ] update_pull_request_description +- [ ] update_pull_request_status +- [ ] update_pull_request_title - [ ] update_repository_description - [ ] update_repository_name @@ -551,6 +731,7 @@ - [ ] delete_application - [ ] delete_deployment_config - [ ] delete_deployment_group +- [ ] delete_git_hub_account_token - [ ] deregister_on_premises_instance - [ ] get_application - [ ] get_application_revision @@ -567,6 +748,7 @@ - [ ] list_deployments - [ ] list_git_hub_account_token_names - [ ] list_on_premises_instances +- [ ] put_lifecycle_event_hook_execution_status - [ ] register_application_revision - [ ] register_on_premises_instance - [ ] remove_tags_from_on_premises_instances @@ -661,13 +843,17 @@ - [ ] admin_link_provider_for_user - [ ] admin_list_devices - [ ] admin_list_groups_for_user +- [ ] admin_list_user_auth_events - [ ] admin_remove_user_from_group - [ ] admin_reset_user_password - [ ] admin_respond_to_auth_challenge +- [ ] admin_set_user_mfa_preference - [ ] admin_set_user_settings +- [ ] admin_update_auth_event_feedback - [ ] admin_update_device_status - [ ] admin_update_user_attributes - [ ] admin_user_global_sign_out +- [ ] associate_software_token - [ ] change_password - [ ] confirm_device - [ ] confirm_forgot_password @@ -689,6 +875,7 @@ - [ ] delete_user_pool_domain - [ ] describe_identity_provider - [ ] describe_resource_server +- [ ] describe_risk_configuration - [ ] describe_user_import_job - [ ] describe_user_pool - [ ] describe_user_pool_client @@ -699,9 +886,11 @@ - [ ] get_device - [ ] get_group - [ ] get_identity_provider_by_identifier +- [ ] get_signing_certificate - [ ] get_ui_customization - [ ] get_user - [ ] get_user_attribute_verification_code +- [ ] get_user_pool_mfa_config - [ ] global_sign_out - [ ] initiate_auth - [ ] list_devices @@ -715,11 +904,15 @@ - [ ] list_users_in_group - [ ] resend_confirmation_code - [ ] respond_to_auth_challenge +- [ ] set_risk_configuration - [ ] set_ui_customization +- [ ] set_user_mfa_preference +- [ ] set_user_pool_mfa_config - [ ] set_user_settings - [ ] sign_up - [ ] start_user_import_job - [ ] stop_user_import_job +- [ ] update_auth_event_feedback - [ ] update_device_status - [ ] update_group - [ ] update_identity_provider @@ -727,6 +920,7 @@ - [ ] update_user_attributes - [ ] update_user_pool - [ ] update_user_pool_client +- [ ] verify_software_token - [ ] verify_user_attribute ## cognito-sync - 0% implemented @@ -748,20 +942,44 @@ - [ ] unsubscribe_from_dataset - [ ] update_records +## comprehend - 0% implemented +- [ ] batch_detect_dominant_language +- [ ] batch_detect_entities +- [ ] batch_detect_key_phrases +- [ ] batch_detect_sentiment +- [ ] describe_topics_detection_job +- [ ] detect_dominant_language +- [ ] detect_entities +- [ ] detect_key_phrases +- [ ] detect_sentiment +- [ ] list_topics_detection_jobs +- [ ] start_topics_detection_job + ## config - 0% implemented +- [ ] batch_get_resource_config +- [ ] delete_aggregation_authorization - [ ] delete_config_rule +- [ ] delete_configuration_aggregator - [ ] delete_configuration_recorder - [ ] delete_delivery_channel - [ ] delete_evaluation_results +- [ ] delete_pending_aggregation_request - [ ] deliver_config_snapshot +- [ ] describe_aggregate_compliance_by_config_rules +- [ ] describe_aggregation_authorizations - [ ] describe_compliance_by_config_rule - [ ] describe_compliance_by_resource - [ ] describe_config_rule_evaluation_status - [ ] describe_config_rules +- [ ] describe_configuration_aggregator_sources_status +- [ ] describe_configuration_aggregators - [ ] describe_configuration_recorder_status - [ ] describe_configuration_recorders - [ ] describe_delivery_channel_status - [ ] describe_delivery_channels +- [ ] describe_pending_aggregation_requests +- [ ] get_aggregate_compliance_details_by_config_rule +- [ ] get_aggregate_config_rule_compliance_summary - [ ] get_compliance_details_by_config_rule - [ ] get_compliance_details_by_resource - [ ] get_compliance_summary_by_config_rule @@ -769,7 +987,9 @@ - [ ] get_discovered_resource_counts - [ ] get_resource_config_history - [ ] list_discovered_resources +- [ ] put_aggregation_authorization - [ ] put_config_rule +- [ ] put_configuration_aggregator - [ ] put_configuration_recorder - [ ] put_delivery_channel - [ ] put_evaluations @@ -777,6 +997,10 @@ - [ ] start_configuration_recorder - [ ] stop_configuration_recorder +## connect - 0% implemented +- [ ] start_outbound_voice_contact +- [ ] stop_contact + ## cur - 0% implemented - [ ] delete_report_definition - [ ] describe_report_definitions @@ -828,11 +1052,13 @@ ## devicefarm - 0% implemented - [ ] create_device_pool +- [ ] create_instance_profile - [ ] create_network_profile - [ ] create_project - [ ] create_remote_access_session - [ ] create_upload - [ ] delete_device_pool +- [ ] delete_instance_profile - [ ] delete_network_profile - [ ] delete_project - [ ] delete_remote_access_session @@ -840,8 +1066,10 @@ - [ ] delete_upload - [ ] get_account_settings - [ ] get_device +- [ ] get_device_instance - [ ] get_device_pool - [ ] get_device_pool_compatibility +- [ ] get_instance_profile - [ ] get_job - [ ] get_network_profile - [ ] get_offering_status @@ -853,8 +1081,10 @@ - [ ] get_upload - [ ] install_to_remote_access_session - [ ] list_artifacts +- [ ] list_device_instances - [ ] list_device_pools - [ ] list_devices +- [ ] list_instance_profiles - [ ] list_jobs - [ ] list_network_profiles - [ ] list_offering_promotions @@ -873,7 +1103,9 @@ - [ ] schedule_run - [ ] stop_remote_access_session - [ ] stop_run +- [ ] update_device_instance - [ ] update_device_pool +- [ ] update_instance_profile - [ ] update_network_profile - [ ] update_project @@ -967,6 +1199,7 @@ - [ ] describe_events - [ ] describe_orderable_replication_instances - [ ] describe_refresh_schemas_status +- [ ] describe_replication_instance_task_logs - [ ] describe_replication_instances - [ ] describe_replication_subnet_groups - [ ] describe_replication_task_assessment_results @@ -980,6 +1213,7 @@ - [ ] modify_replication_instance - [ ] modify_replication_subnet_group - [ ] modify_replication_task +- [ ] reboot_replication_instance - [ ] refresh_schemas - [ ] reload_tables - [ ] remove_tags_from_resource @@ -1030,23 +1264,35 @@ - [ ] update_radius - [ ] verify_trust -## dynamodb - 36% implemented +## dynamodb - 22% implemented - [ ] batch_get_item - [ ] batch_write_item +- [ ] create_backup +- [ ] create_global_table - [X] create_table +- [ ] delete_backup - [X] delete_item - [X] delete_table +- [ ] describe_backup +- [ ] describe_continuous_backups +- [ ] describe_global_table - [ ] describe_limits - [ ] describe_table - [ ] describe_time_to_live - [X] get_item +- [ ] list_backups +- [ ] list_global_tables - [ ] list_tables - [ ] list_tags_of_resource - [X] put_item - [X] query +- [ ] restore_table_from_backup +- [ ] restore_table_to_point_in_time - [X] scan - [ ] tag_resource - [ ] untag_resource +- [ ] update_continuous_backups +- [ ] update_global_table - [ ] update_item - [ ] update_table - [ ] update_time_to_live @@ -1057,8 +1303,9 @@ - [ ] get_shard_iterator - [ ] list_streams -## ec2 - 39% implemented +## ec2 - 36% implemented - [ ] accept_reserved_instances_exchange_quote +- [ ] accept_vpc_endpoint_connections - [X] accept_vpc_peering_connection - [X] allocate_address - [ ] allocate_hosts @@ -1069,7 +1316,7 @@ - [ ] associate_iam_instance_profile - [X] associate_route_table - [ ] associate_subnet_cidr_block -- [ ] associate_vpc_cidr_block +- [X] associate_vpc_cidr_block - [ ] attach_classic_link_vpc - [X] attach_internet_gateway - [X] attach_network_interface @@ -1088,7 +1335,7 @@ - [ ] confirm_product_instance - [ ] copy_fpga_image - [X] copy_image -- [ ] copy_snapshot +- [X] copy_snapshot - [X] create_customer_gateway - [ ] create_default_subnet - [ ] create_default_vpc @@ -1100,6 +1347,8 @@ - [ ] create_instance_export_task - [X] create_internet_gateway - [X] create_key_pair +- [ ] create_launch_template +- [ ] create_launch_template_version - [X] create_nat_gateway - [X] create_network_acl - [X] create_network_acl_entry @@ -1117,6 +1366,8 @@ - [X] create_volume - [X] create_vpc - [ ] create_vpc_endpoint +- [ ] create_vpc_endpoint_connection_notification +- [ ] create_vpc_endpoint_service_configuration - [X] create_vpc_peering_connection - [X] create_vpn_connection - [ ] create_vpn_connection_route @@ -1128,6 +1379,8 @@ - [ ] delete_fpga_image - [X] delete_internet_gateway - [X] delete_key_pair +- [ ] delete_launch_template +- [ ] delete_launch_template_versions - [X] delete_nat_gateway - [X] delete_network_acl - [X] delete_network_acl_entry @@ -1143,6 +1396,8 @@ - [X] delete_tags - [X] delete_volume - [X] delete_vpc +- [ ] delete_vpc_endpoint_connection_notifications +- [ ] delete_vpc_endpoint_service_configurations - [ ] delete_vpc_endpoints - [X] delete_vpc_peering_connection - [X] delete_vpn_connection @@ -1151,6 +1406,7 @@ - [X] deregister_image - [ ] describe_account_attributes - [X] describe_addresses +- [ ] describe_aggregate_id_format - [X] describe_availability_zones - [ ] describe_bundle_tasks - [ ] describe_classic_link_instances @@ -1174,10 +1430,13 @@ - [ ] describe_import_image_tasks - [ ] describe_import_snapshot_tasks - [X] describe_instance_attribute +- [ ] describe_instance_credit_specifications - [ ] describe_instance_status - [ ] describe_instances - [X] describe_internet_gateways - [X] describe_key_pairs +- [ ] describe_launch_template_versions +- [ ] describe_launch_templates - [ ] describe_moving_addresses - [ ] describe_nat_gateways - [ ] describe_network_acls @@ -1186,6 +1445,7 @@ - [X] describe_network_interfaces - [ ] describe_placement_groups - [ ] describe_prefix_lists +- [ ] describe_principal_id_format - [X] describe_regions - [ ] describe_reserved_instances - [ ] describe_reserved_instances_listings @@ -1214,6 +1474,10 @@ - [X] describe_vpc_attribute - [ ] describe_vpc_classic_link - [ ] describe_vpc_classic_link_dns_support +- [ ] describe_vpc_endpoint_connection_notifications +- [ ] describe_vpc_endpoint_connections +- [ ] describe_vpc_endpoint_service_configurations +- [ ] describe_vpc_endpoint_service_permissions - [ ] describe_vpc_endpoint_services - [ ] describe_vpc_endpoints - [ ] describe_vpc_peering_connections @@ -1232,7 +1496,7 @@ - [ ] disassociate_iam_instance_profile - [X] disassociate_route_table - [ ] disassociate_subnet_cidr_block -- [ ] disassociate_vpc_cidr_block +- [X] disassociate_vpc_cidr_block - [ ] enable_vgw_route_propagation - [ ] enable_volume_io - [ ] enable_vpc_classic_link @@ -1240,6 +1504,7 @@ - [ ] get_console_output - [ ] get_console_screenshot - [ ] get_host_reservation_purchase_preview +- [ ] get_launch_template_data - [ ] get_password_data - [ ] get_reserved_instances_exchange_quote - [ ] import_image @@ -1253,7 +1518,9 @@ - [ ] modify_identity_id_format - [ ] modify_image_attribute - [X] modify_instance_attribute +- [ ] modify_instance_credit_specification - [ ] modify_instance_placement +- [ ] modify_launch_template - [X] modify_network_interface_attribute - [ ] modify_reserved_instances - [ ] modify_snapshot_attribute @@ -1263,6 +1530,9 @@ - [ ] modify_volume_attribute - [X] modify_vpc_attribute - [ ] modify_vpc_endpoint +- [ ] modify_vpc_endpoint_connection_notification +- [ ] modify_vpc_endpoint_service_configuration +- [ ] modify_vpc_endpoint_service_permissions - [ ] modify_vpc_peering_connection_options - [ ] modify_vpc_tenancy - [ ] monitor_instances @@ -1272,6 +1542,7 @@ - [ ] purchase_scheduled_instances - [X] reboot_instances - [ ] register_image +- [ ] reject_vpc_endpoint_connections - [X] reject_vpc_peering_connection - [X] release_address - [ ] release_hosts @@ -1302,10 +1573,10 @@ - [ ] update_security_group_rule_descriptions_egress - [ ] update_security_group_rule_descriptions_ingress -## ecr - 27% implemented +## ecr - 31% implemented - [ ] batch_check_layer_availability - [ ] batch_delete_image -- [ ] batch_get_image +- [X] batch_get_image - [ ] complete_layer_upload - [X] create_repository - [ ] delete_lifecycle_policy @@ -1430,6 +1701,7 @@ - [ ] delete_configuration_template - [ ] delete_environment_configuration - [ ] delete_platform_version +- [ ] describe_account_attributes - [ ] describe_application_versions - [ ] describe_applications - [ ] describe_configuration_options @@ -1611,12 +1883,25 @@ - [ ] create_delivery_stream - [ ] delete_delivery_stream - [ ] describe_delivery_stream -- [ ] get_kinesis_stream - [ ] list_delivery_streams - [ ] put_record - [ ] put_record_batch - [ ] update_destination +## fms - 0% implemented +- [ ] associate_admin_account +- [ ] delete_notification_channel +- [ ] delete_policy +- [ ] disassociate_admin_account +- [ ] get_admin_account +- [ ] get_compliance_detail +- [ ] get_notification_channel +- [ ] get_policy +- [ ] list_compliance_status +- [ ] list_policies +- [ ] put_notification_channel +- [ ] put_policy + ## gamelift - 0% implemented - [ ] accept_match - [ ] create_alias @@ -1669,6 +1954,7 @@ - [ ] resolve_alias - [ ] search_game_sessions - [ ] start_game_session_placement +- [ ] start_match_backfill - [ ] start_matchmaking - [ ] stop_game_session_placement - [ ] stop_matchmaking @@ -1723,6 +2009,7 @@ - [ ] batch_delete_connection - [ ] batch_delete_partition - [ ] batch_delete_table +- [ ] batch_delete_table_version - [ ] batch_get_partition - [ ] batch_stop_job_run - [ ] create_classifier @@ -1744,6 +2031,7 @@ - [ ] delete_job - [ ] delete_partition - [ ] delete_table +- [ ] delete_table_version - [ ] delete_trigger - [ ] delete_user_defined_function - [ ] get_catalog_import_status @@ -1768,6 +2056,7 @@ - [ ] get_partitions - [ ] get_plan - [ ] get_table +- [ ] get_table_version - [ ] get_table_versions - [ ] get_tables - [ ] get_trigger @@ -1810,6 +2099,9 @@ - [ ] create_group_version - [ ] create_logger_definition - [ ] create_logger_definition_version +- [ ] create_resource_definition +- [ ] create_resource_definition_version +- [ ] create_software_update_job - [ ] create_subscription_definition - [ ] create_subscription_definition_version - [ ] delete_core_definition @@ -1817,6 +2109,7 @@ - [ ] delete_function_definition - [ ] delete_group - [ ] delete_logger_definition +- [ ] delete_resource_definition - [ ] delete_subscription_definition - [ ] disassociate_role_from_group - [ ] disassociate_service_role_from_account @@ -1835,6 +2128,8 @@ - [ ] get_group_version - [ ] get_logger_definition - [ ] get_logger_definition_version +- [ ] get_resource_definition +- [ ] get_resource_definition_version - [ ] get_service_role_for_account - [ ] get_subscription_definition - [ ] get_subscription_definition_version @@ -1850,6 +2145,8 @@ - [ ] list_groups - [ ] list_logger_definition_versions - [ ] list_logger_definitions +- [ ] list_resource_definition_versions +- [ ] list_resource_definitions - [ ] list_subscription_definition_versions - [ ] list_subscription_definitions - [ ] reset_deployments @@ -1860,8 +2157,48 @@ - [ ] update_group - [ ] update_group_certificate_configuration - [ ] update_logger_definition +- [ ] update_resource_definition - [ ] update_subscription_definition +## guardduty - 0% implemented +- [ ] accept_invitation +- [ ] archive_findings +- [ ] create_detector +- [ ] create_ip_set +- [ ] create_members +- [ ] create_sample_findings +- [ ] create_threat_intel_set +- [ ] decline_invitations +- [ ] delete_detector +- [ ] delete_invitations +- [ ] delete_ip_set +- [ ] delete_members +- [ ] delete_threat_intel_set +- [ ] disassociate_from_master_account +- [ ] disassociate_members +- [ ] get_detector +- [ ] get_findings +- [ ] get_findings_statistics +- [ ] get_invitations_count +- [ ] get_ip_set +- [ ] get_master_account +- [ ] get_members +- [ ] get_threat_intel_set +- [ ] invite_members +- [ ] list_detectors +- [ ] list_findings +- [ ] list_invitations +- [ ] list_ip_sets +- [ ] list_members +- [ ] list_threat_intel_sets +- [ ] start_monitoring_members +- [ ] stop_monitoring_members +- [ ] unarchive_findings +- [ ] update_detector +- [ ] update_findings_feedback +- [ ] update_ip_set +- [ ] update_threat_intel_set + ## health - 0% implemented - [ ] describe_affected_entities - [ ] describe_entity_aggregates @@ -1870,7 +2207,7 @@ - [ ] describe_event_types - [ ] describe_events -## iam - 46% implemented +## iam - 47% implemented - [ ] add_client_id_to_open_id_connect_provider - [X] add_role_to_instance_profile - [X] add_user_to_group @@ -1977,12 +2314,13 @@ - [ ] set_default_policy_version - [ ] simulate_custom_policy - [ ] simulate_principal_policy -- [ ] update_access_key +- [X] update_access_key - [ ] update_account_password_policy - [ ] update_assume_role_policy - [ ] update_group - [X] update_login_profile - [ ] update_open_id_connect_provider_thumbprint +- [ ] update_role - [ ] update_role_description - [ ] update_saml_provider - [ ] update_server_certificate @@ -2037,64 +2375,130 @@ - [ ] unsubscribe_from_event - [ ] update_assessment_target -## iot - 45% implemented +## iot - 29% implemented - [ ] accept_certificate_transfer +- [X] add_thing_to_thing_group +- [ ] associate_targets_with_job +- [ ] attach_policy - [X] attach_principal_policy - [X] attach_thing_principal - [ ] cancel_certificate_transfer +- [ ] cancel_job +- [ ] clear_default_authorizer +- [ ] create_authorizer - [ ] create_certificate_from_csr +- [ ] create_job - [X] create_keys_and_certificate +- [ ] create_ota_update - [X] create_policy - [ ] create_policy_version +- [ ] create_role_alias +- [ ] create_stream - [X] create_thing +- [X] create_thing_group - [X] create_thing_type - [ ] create_topic_rule +- [ ] delete_authorizer - [ ] delete_ca_certificate - [X] delete_certificate +- [ ] delete_ota_update - [X] delete_policy - [ ] delete_policy_version - [ ] delete_registration_code +- [ ] delete_role_alias +- [ ] delete_stream - [X] delete_thing +- [X] delete_thing_group - [X] delete_thing_type - [ ] delete_topic_rule +- [ ] delete_v2_logging_level - [ ] deprecate_thing_type +- [ ] describe_authorizer - [ ] describe_ca_certificate - [X] describe_certificate +- [ ] describe_default_authorizer - [ ] describe_endpoint +- [ ] describe_event_configurations +- [ ] describe_index +- [ ] describe_job +- [ ] describe_job_execution +- [ ] describe_role_alias +- [ ] describe_stream - [X] describe_thing +- [X] describe_thing_group +- [ ] describe_thing_registration_task - [X] describe_thing_type +- [ ] detach_policy - [X] detach_principal_policy - [X] detach_thing_principal - [ ] disable_topic_rule - [ ] enable_topic_rule +- [ ] get_effective_policies +- [ ] get_indexing_configuration +- [ ] get_job_document - [ ] get_logging_options +- [ ] get_ota_update - [X] get_policy - [ ] get_policy_version - [ ] get_registration_code - [ ] get_topic_rule +- [ ] get_v2_logging_options +- [ ] list_attached_policies +- [ ] list_authorizers - [ ] list_ca_certificates - [X] list_certificates - [ ] list_certificates_by_ca +- [ ] list_indices +- [ ] list_job_executions_for_job +- [ ] list_job_executions_for_thing +- [ ] list_jobs +- [ ] list_ota_updates - [ ] list_outgoing_certificates - [X] list_policies - [X] list_policy_principals - [ ] list_policy_versions - [X] list_principal_policies - [X] list_principal_things +- [ ] list_role_aliases +- [ ] list_streams +- [ ] list_targets_for_policy +- [X] list_thing_groups +- [X] list_thing_groups_for_thing - [X] list_thing_principals +- [ ] list_thing_registration_task_reports +- [ ] list_thing_registration_tasks - [X] list_thing_types - [X] list_things +- [X] list_things_in_thing_group - [ ] list_topic_rules +- [ ] list_v2_logging_levels - [ ] register_ca_certificate - [ ] register_certificate +- [ ] register_thing - [ ] reject_certificate_transfer +- [X] remove_thing_from_thing_group - [ ] replace_topic_rule +- [ ] search_index +- [ ] set_default_authorizer - [ ] set_default_policy_version - [ ] set_logging_options +- [ ] set_v2_logging_level +- [ ] set_v2_logging_options +- [ ] start_thing_registration_task +- [ ] stop_thing_registration_task +- [ ] test_authorization +- [ ] test_invoke_authorizer - [ ] transfer_certificate +- [ ] update_authorizer - [ ] update_ca_certificate - [X] update_certificate +- [ ] update_event_configurations +- [ ] update_indexing_configuration +- [ ] update_role_alias +- [ ] update_stream - [X] update_thing +- [X] update_thing_group +- [X] update_thing_groups_for_thing ## iot-data - 0% implemented - [ ] delete_thing_shadow @@ -2102,18 +2506,26 @@ - [ ] publish - [ ] update_thing_shadow -## kinesis - 61% implemented +## iot-jobs-data - 0% implemented +- [ ] describe_job_execution +- [ ] get_pending_job_executions +- [ ] start_next_pending_job_execution +- [ ] update_job_execution + +## kinesis - 56% implemented - [X] add_tags_to_stream - [X] create_stream - [ ] decrease_stream_retention_period - [X] delete_stream - [ ] describe_limits - [X] describe_stream +- [ ] describe_stream_summary - [ ] disable_enhanced_monitoring - [ ] enable_enhanced_monitoring - [X] get_records - [X] get_shard_iterator - [ ] increase_stream_retention_period +- [ ] list_shards - [X] list_streams - [X] list_tags_for_stream - [X] merge_shards @@ -2125,6 +2537,13 @@ - [ ] stop_stream_encryption - [ ] update_shard_count +## kinesis-video-archived-media - 0% implemented +- [ ] get_media_for_fragment_list +- [ ] list_fragments + +## kinesis-video-media - 0% implemented +- [ ] get_media + ## kinesisanalytics - 0% implemented - [ ] add_application_cloud_watch_logging_option - [ ] add_application_input @@ -2144,6 +2563,18 @@ - [ ] stop_application - [ ] update_application +## kinesisvideo - 0% implemented +- [ ] create_stream +- [ ] delete_stream +- [ ] describe_stream +- [ ] get_data_endpoint +- [ ] list_streams +- [ ] list_tags_for_stream +- [ ] tag_stream +- [ ] untag_stream +- [ ] update_data_retention +- [ ] update_stream + ## kms - 25% implemented - [ ] cancel_key_deletion - [ ] create_alias @@ -2189,6 +2620,7 @@ - [ ] delete_alias - [ ] delete_event_source_mapping - [ ] delete_function +- [ ] delete_function_concurrency - [ ] get_account_settings - [ ] get_alias - [ ] get_event_source_mapping @@ -2203,6 +2635,7 @@ - [ ] list_tags - [ ] list_versions_by_function - [ ] publish_version +- [ ] put_function_concurrency - [ ] remove_permission - [ ] tag_resource - [ ] untag_resource @@ -2235,6 +2668,7 @@ - [ ] get_builtin_intents - [ ] get_builtin_slot_types - [ ] get_export +- [ ] get_import - [ ] get_intent - [ ] get_intent_versions - [ ] get_intents @@ -2246,6 +2680,7 @@ - [ ] put_bot_alias - [ ] put_intent - [ ] put_slot_type +- [ ] start_import ## lex-runtime - 0% implemented - [ ] post_content @@ -2254,6 +2689,8 @@ ## lightsail - 0% implemented - [ ] allocate_static_ip - [ ] attach_disk +- [ ] attach_instances_to_load_balancer +- [ ] attach_load_balancer_tls_certificate - [ ] attach_static_ip - [ ] close_instance_public_ports - [ ] create_disk @@ -2265,6 +2702,8 @@ - [ ] create_instances - [ ] create_instances_from_snapshot - [ ] create_key_pair +- [ ] create_load_balancer +- [ ] create_load_balancer_tls_certificate - [ ] delete_disk - [ ] delete_disk_snapshot - [ ] delete_domain @@ -2272,7 +2711,10 @@ - [ ] delete_instance - [ ] delete_instance_snapshot - [ ] delete_key_pair +- [ ] delete_load_balancer +- [ ] delete_load_balancer_tls_certificate - [ ] detach_disk +- [ ] detach_instances_from_load_balancer - [ ] detach_static_ip - [ ] download_default_key_pair - [ ] get_active_names @@ -2294,6 +2736,10 @@ - [ ] get_instances - [ ] get_key_pair - [ ] get_key_pairs +- [ ] get_load_balancer +- [ ] get_load_balancer_metric_data +- [ ] get_load_balancer_tls_certificates +- [ ] get_load_balancers - [ ] get_operation - [ ] get_operations - [ ] get_operations_for_resource @@ -2311,6 +2757,7 @@ - [ ] stop_instance - [ ] unpeer_vpc - [ ] update_domain_entry +- [ ] update_load_balancer_attribute ## logs - 24% implemented - [ ] associate_kms_key @@ -2384,6 +2831,79 @@ - [ ] generate_data_set - [ ] start_support_data_export +## mediaconvert - 0% implemented +- [ ] cancel_job +- [ ] create_job +- [ ] create_job_template +- [ ] create_preset +- [ ] create_queue +- [ ] delete_job_template +- [ ] delete_preset +- [ ] delete_queue +- [ ] describe_endpoints +- [ ] get_job +- [ ] get_job_template +- [ ] get_preset +- [ ] get_queue +- [ ] list_job_templates +- [ ] list_jobs +- [ ] list_presets +- [ ] list_queues +- [ ] update_job_template +- [ ] update_preset +- [ ] update_queue + +## medialive - 0% implemented +- [ ] create_channel +- [ ] create_input +- [ ] create_input_security_group +- [ ] delete_channel +- [ ] delete_input +- [ ] delete_input_security_group +- [ ] describe_channel +- [ ] describe_input +- [ ] describe_input_security_group +- [ ] list_channels +- [ ] list_input_security_groups +- [ ] list_inputs +- [ ] start_channel +- [ ] stop_channel +- [ ] update_channel +- [ ] update_input +- [ ] update_input_security_group + +## mediapackage - 0% implemented +- [ ] create_channel +- [ ] create_origin_endpoint +- [ ] delete_channel +- [ ] delete_origin_endpoint +- [ ] describe_channel +- [ ] describe_origin_endpoint +- [ ] list_channels +- [ ] list_origin_endpoints +- [ ] rotate_channel_credentials +- [ ] update_channel +- [ ] update_origin_endpoint + +## mediastore - 0% implemented +- [ ] create_container +- [ ] delete_container +- [ ] delete_container_policy +- [ ] delete_cors_policy +- [ ] describe_container +- [ ] get_container_policy +- [ ] get_cors_policy +- [ ] list_containers +- [ ] put_container_policy +- [ ] put_cors_policy + +## mediastore-data - 0% implemented +- [ ] delete_object +- [ ] describe_object +- [ ] get_object +- [ ] list_items +- [ ] put_object + ## meteringmarketplace - 0% implemented - [ ] batch_meter_usage - [ ] meter_usage @@ -2418,6 +2938,25 @@ - [ ] list_projects - [ ] update_project +## mq - 0% implemented +- [ ] create_broker +- [ ] create_configuration +- [ ] create_user +- [ ] delete_broker +- [ ] delete_user +- [ ] describe_broker +- [ ] describe_configuration +- [ ] describe_configuration_revision +- [ ] describe_user +- [ ] list_brokers +- [ ] list_configuration_revisions +- [ ] list_configurations +- [ ] list_users +- [ ] reboot_broker +- [ ] update_broker +- [ ] update_configuration +- [ ] update_user + ## mturk - 0% implemented - [ ] accept_qualification_request - [ ] approve_assignment @@ -2459,13 +2998,13 @@ - [ ] update_notification_settings - [ ] update_qualification_type -## opsworks - 9% implemented +## opsworks - 12% implemented - [ ] assign_instance - [ ] assign_volume - [ ] associate_elastic_ip - [ ] attach_elastic_load_balancer - [ ] clone_stack -- [ ] create_app +- [X] create_app - [ ] create_deployment - [X] create_instance - [X] create_layer @@ -2482,7 +3021,7 @@ - [ ] deregister_rds_db_instance - [ ] deregister_volume - [ ] describe_agent_versions -- [ ] describe_apps +- [X] describe_apps - [ ] describe_commands - [ ] describe_deployments - [ ] describe_ecs_clusters @@ -2492,6 +3031,7 @@ - [X] describe_layers - [ ] describe_load_based_auto_scaling - [ ] describe_my_user_profile +- [ ] describe_operating_systems - [ ] describe_permissions - [ ] describe_raid_arrays - [ ] describe_rds_db_instances @@ -2598,6 +3138,7 @@ ## pinpoint - 0% implemented - [ ] create_app - [ ] create_campaign +- [ ] create_export_job - [ ] create_import_job - [ ] create_segment - [ ] delete_adm_channel @@ -2609,6 +3150,7 @@ - [ ] delete_baidu_channel - [ ] delete_campaign - [ ] delete_email_channel +- [ ] delete_endpoint - [ ] delete_event_stream - [ ] delete_gcm_channel - [ ] delete_segment @@ -2630,10 +3172,13 @@ - [ ] get_email_channel - [ ] get_endpoint - [ ] get_event_stream +- [ ] get_export_job +- [ ] get_export_jobs - [ ] get_gcm_channel - [ ] get_import_job - [ ] get_import_jobs - [ ] get_segment +- [ ] get_segment_export_jobs - [ ] get_segment_import_jobs - [ ] get_segment_version - [ ] get_segment_versions @@ -2763,7 +3308,7 @@ - [ ] start_db_instance - [ ] stop_db_instance -## redshift - 31% implemented +## redshift - 41% implemented - [ ] authorize_cluster_security_group_ingress - [ ] authorize_snapshot_access - [ ] copy_cluster_snapshot @@ -2775,7 +3320,7 @@ - [ ] create_event_subscription - [ ] create_hsm_client_certificate - [ ] create_hsm_configuration -- [ ] create_snapshot_copy_grant +- [X] create_snapshot_copy_grant - [X] create_tags - [X] delete_cluster - [X] delete_cluster_parameter_group @@ -2785,7 +3330,7 @@ - [ ] delete_event_subscription - [ ] delete_hsm_client_certificate - [ ] delete_hsm_configuration -- [ ] delete_snapshot_copy_grant +- [X] delete_snapshot_copy_grant - [X] delete_tags - [X] describe_cluster_parameter_groups - [ ] describe_cluster_parameters @@ -2805,20 +3350,20 @@ - [ ] describe_reserved_node_offerings - [ ] describe_reserved_nodes - [ ] describe_resize -- [ ] describe_snapshot_copy_grants +- [X] describe_snapshot_copy_grants - [ ] describe_table_restore_status - [X] describe_tags - [ ] disable_logging -- [ ] disable_snapshot_copy +- [X] disable_snapshot_copy - [ ] enable_logging -- [ ] enable_snapshot_copy +- [X] enable_snapshot_copy - [ ] get_cluster_credentials - [X] modify_cluster - [ ] modify_cluster_iam_roles - [ ] modify_cluster_parameter_group - [ ] modify_cluster_subnet_group - [ ] modify_event_subscription -- [ ] modify_snapshot_copy_retention_period +- [X] modify_snapshot_copy_retention_period - [ ] purchase_reserved_node_offering - [ ] reboot_cluster - [ ] reset_cluster_parameter_group @@ -2831,18 +3376,51 @@ ## rekognition - 0% implemented - [ ] compare_faces - [ ] create_collection +- [ ] create_stream_processor - [ ] delete_collection - [ ] delete_faces +- [ ] delete_stream_processor +- [ ] describe_stream_processor - [ ] detect_faces - [ ] detect_labels - [ ] detect_moderation_labels +- [ ] detect_text - [ ] get_celebrity_info +- [ ] get_celebrity_recognition +- [ ] get_content_moderation +- [ ] get_face_detection +- [ ] get_face_search +- [ ] get_label_detection +- [ ] get_person_tracking - [ ] index_faces - [ ] list_collections - [ ] list_faces +- [ ] list_stream_processors - [ ] recognize_celebrities - [ ] search_faces - [ ] search_faces_by_image +- [ ] start_celebrity_recognition +- [ ] start_content_moderation +- [ ] start_face_detection +- [ ] start_face_search +- [ ] start_label_detection +- [ ] start_person_tracking +- [ ] start_stream_processor +- [ ] stop_stream_processor + +## resource-groups - 0% implemented +- [ ] create_group +- [ ] delete_group +- [ ] get_group +- [ ] get_group_query +- [ ] get_tags +- [ ] list_group_resources +- [ ] list_groups +- [ ] search_resources +- [ ] tag +- [ ] untag +- [ ] update_group +- [ ] update_group_query ## resourcegroupstaggingapi - 60% implemented - [X] get_resources @@ -2935,7 +3513,7 @@ - [ ] update_tags_for_domain - [ ] view_billing -## s3 - 12% implemented +## s3 - 15% implemented - [ ] abort_multipart_upload - [ ] complete_multipart_upload - [ ] copy_object @@ -2997,10 +3575,10 @@ - [ ] put_bucket_inventory_configuration - [ ] put_bucket_lifecycle - [ ] put_bucket_lifecycle_configuration -- [ ] put_bucket_logging +- [X] put_bucket_logging - [ ] put_bucket_metrics_configuration - [ ] put_bucket_notification -- [ ] put_bucket_notification_configuration +- [X] put_bucket_notification_configuration - [ ] put_bucket_policy - [ ] put_bucket_replication - [ ] put_bucket_request_payment @@ -3011,9 +3589,49 @@ - [ ] put_object_acl - [ ] put_object_tagging - [ ] restore_object +- [ ] select_object_content - [ ] upload_part - [ ] upload_part_copy +## sagemaker - 0% implemented +- [ ] add_tags +- [ ] create_endpoint +- [ ] create_endpoint_config +- [ ] create_model +- [ ] create_notebook_instance +- [ ] create_notebook_instance_lifecycle_config +- [ ] create_presigned_notebook_instance_url +- [ ] create_training_job +- [ ] delete_endpoint +- [ ] delete_endpoint_config +- [ ] delete_model +- [ ] delete_notebook_instance +- [ ] delete_notebook_instance_lifecycle_config +- [ ] delete_tags +- [ ] describe_endpoint +- [ ] describe_endpoint_config +- [ ] describe_model +- [ ] describe_notebook_instance +- [ ] describe_notebook_instance_lifecycle_config +- [ ] describe_training_job +- [ ] list_endpoint_configs +- [ ] list_endpoints +- [ ] list_models +- [ ] list_notebook_instance_lifecycle_configs +- [ ] list_notebook_instances +- [ ] list_tags +- [ ] list_training_jobs +- [ ] start_notebook_instance +- [ ] stop_notebook_instance +- [ ] stop_training_job +- [ ] update_endpoint +- [ ] update_endpoint_weights_and_capacities +- [ ] update_notebook_instance +- [ ] update_notebook_instance_lifecycle_config + +## sagemaker-runtime - 0% implemented +- [ ] invoke_endpoint + ## sdb - 0% implemented - [ ] batch_delete_attributes - [ ] batch_put_attributes @@ -3026,6 +3644,35 @@ - [ ] put_attributes - [ ] select +## secretsmanager - 0% implemented +- [ ] cancel_rotate_secret +- [ ] create_secret +- [ ] delete_secret +- [ ] describe_secret +- [ ] get_random_password +- [ ] get_secret_value +- [ ] list_secret_version_ids +- [ ] list_secrets +- [ ] put_secret_value +- [ ] restore_secret +- [ ] rotate_secret +- [ ] tag_resource +- [ ] untag_resource +- [ ] update_secret +- [ ] update_secret_version_stage + +## serverlessrepo - 0% implemented +- [ ] create_application +- [ ] create_application_version +- [ ] create_cloud_formation_change_set +- [ ] delete_application +- [ ] get_application +- [ ] get_application_policy +- [ ] list_application_versions +- [ ] list_applications +- [ ] put_application_policy +- [ ] update_application + ## servicecatalog - 0% implemented - [ ] accept_portfolio_share - [ ] associate_principal_with_portfolio @@ -3036,13 +3683,16 @@ - [ ] create_portfolio - [ ] create_portfolio_share - [ ] create_product +- [ ] create_provisioned_product_plan - [ ] create_provisioning_artifact - [ ] create_tag_option - [ ] delete_constraint - [ ] delete_portfolio - [ ] delete_portfolio_share - [ ] delete_product +- [ ] delete_provisioned_product_plan - [ ] delete_provisioning_artifact +- [ ] delete_tag_option - [ ] describe_constraint - [ ] describe_copy_product_status - [ ] describe_portfolio @@ -3050,6 +3700,7 @@ - [ ] describe_product_as_admin - [ ] describe_product_view - [ ] describe_provisioned_product +- [ ] describe_provisioned_product_plan - [ ] describe_provisioning_artifact - [ ] describe_provisioning_parameters - [ ] describe_record @@ -3057,6 +3708,7 @@ - [ ] disassociate_principal_from_portfolio - [ ] disassociate_product_from_portfolio - [ ] disassociate_tag_option_from_resource +- [ ] execute_provisioned_product_plan - [ ] list_accepted_portfolio_shares - [ ] list_constraints_for_portfolio - [ ] list_launch_paths @@ -3064,6 +3716,7 @@ - [ ] list_portfolios - [ ] list_portfolios_for_product - [ ] list_principals_for_portfolio +- [ ] list_provisioned_product_plans - [ ] list_provisioning_artifacts - [ ] list_record_history - [ ] list_resources_for_tag_option @@ -3073,6 +3726,7 @@ - [ ] scan_provisioned_products - [ ] search_products - [ ] search_products_as_admin +- [ ] search_provisioned_products - [ ] terminate_provisioned_product - [ ] update_constraint - [ ] update_portfolio @@ -3081,11 +3735,32 @@ - [ ] update_provisioning_artifact - [ ] update_tag_option -## ses - 12% implemented +## servicediscovery - 0% implemented +- [ ] create_private_dns_namespace +- [ ] create_public_dns_namespace +- [ ] create_service +- [ ] delete_namespace +- [ ] delete_service +- [ ] deregister_instance +- [ ] get_instance +- [ ] get_instances_health_status +- [ ] get_namespace +- [ ] get_operation +- [ ] get_service +- [ ] list_instances +- [ ] list_namespaces +- [ ] list_operations +- [ ] list_services +- [ ] register_instance +- [ ] update_instance_custom_health_status +- [ ] update_service + +## ses - 11% implemented - [ ] clone_receipt_rule_set - [ ] create_configuration_set - [ ] create_configuration_set_event_destination - [ ] create_configuration_set_tracking_options +- [ ] create_custom_verification_email_template - [ ] create_receipt_filter - [ ] create_receipt_rule - [ ] create_receipt_rule_set @@ -3093,6 +3768,7 @@ - [ ] delete_configuration_set - [ ] delete_configuration_set_event_destination - [ ] delete_configuration_set_tracking_options +- [ ] delete_custom_verification_email_template - [X] delete_identity - [ ] delete_identity_policy - [ ] delete_receipt_filter @@ -3105,6 +3781,7 @@ - [ ] describe_receipt_rule - [ ] describe_receipt_rule_set - [ ] get_account_sending_enabled +- [ ] get_custom_verification_email_template - [ ] get_identity_dkim_attributes - [ ] get_identity_mail_from_domain_attributes - [ ] get_identity_notification_attributes @@ -3114,6 +3791,7 @@ - [ ] get_send_statistics - [ ] get_template - [ ] list_configuration_sets +- [ ] list_custom_verification_email_templates - [X] list_identities - [ ] list_identity_policies - [ ] list_receipt_filters @@ -3124,6 +3802,7 @@ - [ ] reorder_receipt_rule_set - [ ] send_bounce - [ ] send_bulk_templated_email +- [ ] send_custom_verification_email - [X] send_email - [X] send_raw_email - [ ] send_templated_email @@ -3140,6 +3819,7 @@ - [ ] update_configuration_set_reputation_metrics_enabled - [ ] update_configuration_set_sending_enabled - [ ] update_configuration_set_tracking_options +- [ ] update_custom_verification_email_template - [ ] update_receipt_rule - [ ] update_template - [ ] verify_domain_dkim @@ -3155,6 +3835,7 @@ - [ ] describe_attack - [ ] describe_protection - [ ] describe_subscription +- [ ] get_subscription_state - [ ] list_attacks - [ ] list_protections @@ -3222,7 +3903,7 @@ - [X] subscribe - [X] unsubscribe -## sqs - 60% implemented +## sqs - 65% implemented - [X] add_permission - [X] change_message_visibility - [ ] change_message_visibility_batch @@ -3240,11 +3921,11 @@ - [X] remove_permission - [X] send_message - [ ] send_message_batch -- [ ] set_queue_attributes +- [X] set_queue_attributes - [X] tag_queue - [X] untag_queue -## ssm - 9% implemented +## ssm - 10% implemented - [X] add_tags_to_resource - [ ] cancel_command - [ ] create_activation @@ -3269,6 +3950,7 @@ - [ ] describe_activations - [ ] describe_association - [ ] describe_automation_executions +- [ ] describe_automation_step_executions - [ ] describe_available_patches - [ ] describe_document - [ ] describe_document_permission @@ -3329,7 +4011,7 @@ - [ ] register_task_with_maintenance_window - [X] remove_tags_from_resource - [ ] send_automation_signal -- [ ] send_command +- [X] send_command - [ ] start_automation_execution - [ ] stop_automation_execution - [ ] update_association @@ -3410,6 +4092,7 @@ - [ ] list_volume_initiators - [ ] list_volume_recovery_points - [ ] list_volumes +- [ ] notify_when_uploaded - [ ] refresh_cache - [ ] remove_tags_from_resource - [ ] reset_cache @@ -3452,7 +4135,7 @@ - [ ] refresh_trusted_advisor_check - [ ] resolve_case -## swf - 54% implemented +## swf - 58% implemented - [ ] count_closed_workflow_executions - [ ] count_open_workflow_executions - [X] count_pending_activity_tasks @@ -3481,10 +4164,23 @@ - [X] respond_activity_task_completed - [X] respond_activity_task_failed - [X] respond_decision_task_completed -- [ ] signal_workflow_execution +- [X] signal_workflow_execution - [X] start_workflow_execution - [X] terminate_workflow_execution +## transcribe - 0% implemented +- [ ] create_vocabulary +- [ ] delete_vocabulary +- [ ] get_transcription_job +- [ ] get_vocabulary +- [ ] list_transcription_jobs +- [ ] list_vocabularies +- [ ] start_transcription_job +- [ ] update_vocabulary + +## translate - 0% implemented +- [ ] translate_text + ## waf - 0% implemented - [ ] create_byte_match_set - [ ] create_geo_match_set @@ -3493,6 +4189,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3500,10 +4197,12 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3513,27 +4212,33 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets - [ ] list_rate_based_rules - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set @@ -3541,6 +4246,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3555,6 +4261,7 @@ - [ ] create_regex_match_set - [ ] create_regex_pattern_set - [ ] create_rule +- [ ] create_rule_group - [ ] create_size_constraint_set - [ ] create_sql_injection_match_set - [ ] create_web_acl @@ -3562,10 +4269,12 @@ - [ ] delete_byte_match_set - [ ] delete_geo_match_set - [ ] delete_ip_set +- [ ] delete_permission_policy - [ ] delete_rate_based_rule - [ ] delete_regex_match_set - [ ] delete_regex_pattern_set - [ ] delete_rule +- [ ] delete_rule_group - [ ] delete_size_constraint_set - [ ] delete_sql_injection_match_set - [ ] delete_web_acl @@ -3576,17 +4285,20 @@ - [ ] get_change_token_status - [ ] get_geo_match_set - [ ] get_ip_set +- [ ] get_permission_policy - [ ] get_rate_based_rule - [ ] get_rate_based_rule_managed_keys - [ ] get_regex_match_set - [ ] get_regex_pattern_set - [ ] get_rule +- [ ] get_rule_group - [ ] get_sampled_requests - [ ] get_size_constraint_set - [ ] get_sql_injection_match_set - [ ] get_web_acl - [ ] get_web_acl_for_resource - [ ] get_xss_match_set +- [ ] list_activated_rules_in_rule_group - [ ] list_byte_match_sets - [ ] list_geo_match_sets - [ ] list_ip_sets @@ -3594,11 +4306,14 @@ - [ ] list_regex_match_sets - [ ] list_regex_pattern_sets - [ ] list_resources_for_web_acl +- [ ] list_rule_groups - [ ] list_rules - [ ] list_size_constraint_sets - [ ] list_sql_injection_match_sets +- [ ] list_subscribed_rule_groups - [ ] list_web_acls - [ ] list_xss_match_sets +- [ ] put_permission_policy - [ ] update_byte_match_set - [ ] update_geo_match_set - [ ] update_ip_set @@ -3606,6 +4321,7 @@ - [ ] update_regex_match_set - [ ] update_regex_pattern_set - [ ] update_rule +- [ ] update_rule_group - [ ] update_size_constraint_set - [ ] update_sql_injection_match_set - [ ] update_web_acl @@ -3634,6 +4350,7 @@ - [ ] describe_comments - [ ] describe_document_versions - [ ] describe_folder_contents +- [ ] describe_groups - [ ] describe_notification_subscriptions - [ ] describe_resource_permissions - [ ] describe_root_folders @@ -3652,6 +4369,36 @@ - [ ] update_folder - [ ] update_user +## workmail - 0% implemented +- [ ] associate_delegate_to_resource +- [ ] associate_member_to_group +- [ ] create_alias +- [ ] create_group +- [ ] create_resource +- [ ] create_user +- [ ] delete_alias +- [ ] delete_group +- [ ] delete_resource +- [ ] delete_user +- [ ] deregister_from_work_mail +- [ ] describe_group +- [ ] describe_organization +- [ ] describe_resource +- [ ] describe_user +- [ ] disassociate_delegate_from_resource +- [ ] disassociate_member_from_group +- [ ] list_aliases +- [ ] list_group_members +- [ ] list_groups +- [ ] list_organizations +- [ ] list_resource_delegates +- [ ] list_resources +- [ ] list_users +- [ ] register_to_work_mail +- [ ] reset_password +- [ ] update_primary_email_address +- [ ] update_resource + ## workspaces - 0% implemented - [ ] create_tags - [ ] create_workspaces diff --git a/Makefile b/Makefile index 99b7f2620..98840ba9b 100644 --- a/Makefile +++ b/Makefile @@ -36,14 +36,13 @@ tag_github_release: git tag `python setup.py --version` git push origin `python setup.py --version` -publish: implementation_coverage \ - upload_pypi_artifact \ +publish: upload_pypi_artifact \ tag_github_release \ push_dockerhub_image implementation_coverage: ./scripts/implementation_coverage.py > IMPLEMENTATION_COVERAGE.md - git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" + git commit IMPLEMENTATION_COVERAGE.md -m "Updating implementation coverage" || true scaffold: @pip install -r requirements-dev.txt > /dev/null diff --git a/README.md b/README.md index 59dc67432..9642a8db6 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,8 @@ It gets even better! Moto isn't just for Python code and it isn't just for S3. L |------------------------------------------------------------------------------| | CloudwatchEvents | @mock_events | all endpoints done | |------------------------------------------------------------------------------| +| Cognito Identity | @mock_cognitoidentity| basic endpoints done | +|------------------------------------------------------------------------------| | Data Pipeline | @mock_datapipeline| basic endpoints done | |------------------------------------------------------------------------------| | DynamoDB | @mock_dynamodb | core endpoints done | diff --git a/docs/docs/getting_started.rst b/docs/docs/getting_started.rst index 97f667d26..d52e76235 100644 --- a/docs/docs/getting_started.rst +++ b/docs/docs/getting_started.rst @@ -20,7 +20,7 @@ If you want to install ``moto`` from source:: Moto usage ---------- -For example we have the following code we want to test: +For example, we have the following code we want to test: .. sourcecode:: python @@ -39,12 +39,12 @@ For example we have the following code we want to test: k.key = self.name k.set_contents_from_string(self.value) -There are several method to do this, just keep in mind Moto creates a full blank environment. +There are several ways to do this, but you should keep in mind that Moto creates a full, blank environment. Decorator ~~~~~~~~~ -With a decorator wrapping all the calls to S3 are automatically mocked out. +With a decorator wrapping, all the calls to S3 are automatically mocked out. .. sourcecode:: python @@ -66,7 +66,7 @@ With a decorator wrapping all the calls to S3 are automatically mocked out. Context manager ~~~~~~~~~~~~~~~ -Same as decorator, every call inside ``with`` statement are mocked out. +Same as the Decorator, every call inside the ``with`` statement is mocked out. .. sourcecode:: python @@ -83,7 +83,7 @@ Same as decorator, every call inside ``with`` statement are mocked out. Raw ~~~ -You can also start and stop manually the mocking. +You can also start and stop the mocking manually. .. sourcecode:: python @@ -104,11 +104,11 @@ You can also start and stop manually the mocking. Stand-alone server mode ~~~~~~~~~~~~~~~~~~~~~~~ -Moto comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. It is very useful to test even if you don't use Python. +Moto also comes with a stand-alone server allowing you to mock out an AWS HTTP endpoint. For testing purposes, it's extremely useful even if you don't use Python. .. sourcecode:: bash $ moto_server ec2 -p3000 * Running on http://127.0.0.1:3000/ -This method isn't encouraged if you're using ``boto``, best is to use decorator method. +However, this method isn't encouraged if you're using ``boto``, the best solution would be to use a decorator method. diff --git a/moto/__init__.py b/moto/__init__.py index 3508dfeda..c6f24388b 100644 --- a/moto/__init__.py +++ b/moto/__init__.py @@ -3,7 +3,7 @@ import logging # logging.getLogger('boto').setLevel(logging.CRITICAL) __title__ = 'moto' -__version__ = '1.0.1' +__version__ = '1.3.3' from .acm import mock_acm # flake8: noqa from .apigateway import mock_apigateway, mock_apigateway_deprecated # flake8: noqa @@ -11,6 +11,7 @@ from .autoscaling import mock_autoscaling, mock_autoscaling_deprecated # flake8 from .awslambda import mock_lambda, mock_lambda_deprecated # flake8: noqa from .cloudformation import mock_cloudformation, mock_cloudformation_deprecated # flake8: noqa from .cloudwatch import mock_cloudwatch, mock_cloudwatch_deprecated # flake8: noqa +from .cognitoidentity import mock_cognitoidentity, mock_cognitoidentity_deprecated # flake8: noqa from .datapipeline import mock_datapipeline, mock_datapipeline_deprecated # flake8: noqa from .dynamodb import mock_dynamodb, mock_dynamodb_deprecated # flake8: noqa from .dynamodb2 import mock_dynamodb2, mock_dynamodb2_deprecated # flake8: noqa diff --git a/moto/apigateway/models.py b/moto/apigateway/models.py index e7ff98119..160b443b0 100644 --- a/moto/apigateway/models.py +++ b/moto/apigateway/models.py @@ -1,12 +1,14 @@ from __future__ import absolute_import from __future__ import unicode_literals -import datetime +import random +import string import requests +import time -from moto.packages.responses import responses +from boto3.session import Session +import responses from moto.core import BaseBackend, BaseModel -from moto.core.utils import iso_8601_datetime_with_milliseconds from .utils import create_id from .exceptions import StageNotFoundException @@ -20,8 +22,7 @@ class Deployment(BaseModel, dict): self['id'] = deployment_id self['stageName'] = name self['description'] = description - self['createdDate'] = iso_8601_datetime_with_milliseconds( - datetime.datetime.now()) + self['createdDate'] = int(time.time()) class IntegrationResponse(BaseModel, dict): @@ -293,6 +294,25 @@ class Stage(BaseModel, dict): raise Exception('Patch operation "%s" not implemented' % op['op']) +class ApiKey(BaseModel, dict): + + def __init__(self, name=None, description=None, enabled=True, + generateDistinctId=False, value=None, stageKeys=None, customerId=None): + super(ApiKey, self).__init__() + self['id'] = create_id() + if generateDistinctId: + # Best guess of what AWS does internally + self['value'] = ''.join(random.sample(string.ascii_letters + string.digits, 40)) + else: + self['value'] = value + self['name'] = name + self['customerId'] = customerId + self['description'] = description + self['enabled'] = enabled + self['createdDate'] = self['lastUpdatedDate'] = int(time.time()) + self['stageKeys'] = stageKeys + + class RestAPI(BaseModel): def __init__(self, id, region_name, name, description): @@ -300,7 +320,7 @@ class RestAPI(BaseModel): self.region_name = region_name self.name = name self.description = description - self.create_date = datetime.datetime.utcnow() + self.create_date = int(time.time()) self.deployments = {} self.stages = {} @@ -313,7 +333,7 @@ class RestAPI(BaseModel): "id": self.id, "name": self.name, "description": self.description, - "createdDate": iso_8601_datetime_with_milliseconds(self.create_date), + "createdDate": int(time.time()), } def add_child(self, path, parent_id=None): @@ -388,6 +408,7 @@ class APIGatewayBackend(BaseBackend): def __init__(self, region_name): super(APIGatewayBackend, self).__init__() self.apis = {} + self.keys = {} self.region_name = region_name def reset(self): @@ -541,8 +562,22 @@ class APIGatewayBackend(BaseBackend): api = self.get_rest_api(function_id) return api.delete_deployment(deployment_id) + def create_apikey(self, payload): + key = ApiKey(**payload) + self.keys[key['id']] = key + return key + + def get_apikeys(self): + return list(self.keys.values()) + + def get_apikey(self, api_key_id): + return self.keys[api_key_id] + + def delete_apikey(self, api_key_id): + self.keys.pop(api_key_id) + return {} + apigateway_backends = {} -# Not available in boto yet -for region_name in ['us-east-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1']: +for region_name in Session().get_available_regions('apigateway'): apigateway_backends[region_name] = APIGatewayBackend(region_name) diff --git a/moto/apigateway/responses.py b/moto/apigateway/responses.py index 443fd4060..ff6ef1f33 100644 --- a/moto/apigateway/responses.py +++ b/moto/apigateway/responses.py @@ -226,3 +226,25 @@ class APIGatewayResponse(BaseResponse): deployment = self.backend.delete_deployment( function_id, deployment_id) return 200, {}, json.dumps(deployment) + + def apikeys(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + if self.method == 'POST': + apikey_response = self.backend.create_apikey(json.loads(self.body)) + elif self.method == 'GET': + apikeys_response = self.backend.get_apikeys() + return 200, {}, json.dumps({"item": apikeys_response}) + return 200, {}, json.dumps(apikey_response) + + def apikey_individual(self, request, full_url, headers): + self.setup_class(request, full_url, headers) + + url_path_parts = self.path.split("/") + apikey = url_path_parts[2] + + if self.method == 'GET': + apikey_response = self.backend.get_apikey(apikey) + elif self.method == 'DELETE': + apikey_response = self.backend.delete_apikey(apikey) + return 200, {}, json.dumps(apikey_response) diff --git a/moto/apigateway/urls.py b/moto/apigateway/urls.py index 5637699e0..ca1f445a7 100644 --- a/moto/apigateway/urls.py +++ b/moto/apigateway/urls.py @@ -18,4 +18,6 @@ url_paths = { '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/responses/(?P\d+)$': APIGatewayResponse().resource_method_responses, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/?$': APIGatewayResponse().integrations, '{0}/restapis/(?P[^/]+)/resources/(?P[^/]+)/methods/(?P[^/]+)/integration/responses/(?P\d+)/?$': APIGatewayResponse().integration_responses, + '{0}/apikeys$': APIGatewayResponse().apikeys, + '{0}/apikeys/(?P[^/]+)': APIGatewayResponse().apikey_individual, } diff --git a/moto/autoscaling/exceptions.py b/moto/autoscaling/exceptions.py index 15b2e4f4a..7dd81e0d6 100644 --- a/moto/autoscaling/exceptions.py +++ b/moto/autoscaling/exceptions.py @@ -3,11 +3,12 @@ from moto.core.exceptions import RESTError class AutoscalingClientError(RESTError): + code = 400 + + +class ResourceContentionError(RESTError): code = 500 - -class ResourceContentionError(AutoscalingClientError): - def __init__(self): super(ResourceContentionError, self).__init__( "ResourceContentionError", diff --git a/moto/autoscaling/models.py b/moto/autoscaling/models.py index ab99e4119..0ebc4c465 100644 --- a/moto/autoscaling/models.py +++ b/moto/autoscaling/models.py @@ -7,7 +7,7 @@ from moto.elb import elb_backends from moto.elbv2 import elbv2_backends from moto.elb.exceptions import LoadBalancerNotFoundError from .exceptions import ( - ResourceContentionError, + AutoscalingClientError, ResourceContentionError, ) # http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AS_Concepts.html#Cooldown @@ -155,14 +155,21 @@ class FakeAutoScalingGroup(BaseModel): autoscaling_backend, tags): self.autoscaling_backend = autoscaling_backend self.name = name + + if not availability_zones and not vpc_zone_identifier: + raise AutoscalingClientError( + "ValidationError", + "At least one Availability Zone or VPC Subnet is required." + ) self.availability_zones = availability_zones + self.vpc_zone_identifier = vpc_zone_identifier + self.max_size = max_size self.min_size = min_size self.launch_config = self.autoscaling_backend.launch_configurations[ launch_config_name] self.launch_config_name = launch_config_name - self.vpc_zone_identifier = vpc_zone_identifier self.default_cooldown = default_cooldown if default_cooldown else DEFAULT_COOLDOWN self.health_check_period = health_check_period @@ -172,6 +179,7 @@ class FakeAutoScalingGroup(BaseModel): self.placement_group = placement_group self.termination_policies = termination_policies + self.suspended_processes = [] self.instance_states = [] self.tags = tags if tags else [] self.set_desired_capacity(desired_capacity) @@ -614,6 +622,10 @@ class AutoScalingBackend(BaseBackend): asg_targets = [{'id': x.instance.id} for x in group.instance_states] self.elbv2_backend.deregister_targets(target_group, (asg_targets)) + def suspend_processes(self, group_name, scaling_processes): + group = self.autoscaling_groups[group_name] + group.suspended_processes = scaling_processes or [] + autoscaling_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/autoscaling/responses.py b/moto/autoscaling/responses.py index d3f9ca483..c7170e17e 100644 --- a/moto/autoscaling/responses.py +++ b/moto/autoscaling/responses.py @@ -166,7 +166,7 @@ class AutoScalingResponse(BaseResponse): start = all_names.index(token) + 1 else: start = 0 - max_records = self._get_param("MaxRecords", 50) + max_records = self._get_int_param("MaxRecords", 50) if max_records > 100: raise ValueError groups = all_groups[start:start + max_records] @@ -283,6 +283,13 @@ class AutoScalingResponse(BaseResponse): template = self.response_template(DETACH_LOAD_BALANCERS_TEMPLATE) return template.render() + def suspend_processes(self): + autoscaling_group_name = self._get_param('AutoScalingGroupName') + scaling_processes = self._get_multi_param('ScalingProcesses.member') + self.autoscaling_backend.suspend_processes(autoscaling_group_name, scaling_processes) + template = self.response_template(SUSPEND_PROCESSES_TEMPLATE) + return template.render() + CREATE_LAUNCH_CONFIGURATION_TEMPLATE = """ @@ -463,7 +470,14 @@ DESCRIBE_AUTOSCALING_GROUPS_TEMPLATE = """ {% endfor %} - + + {% for suspended_process in group.suspended_processes %} + + {{suspended_process}} + + + {% endfor %} + {{ group.name }} {{ group.health_check_type }} 2013-05-06T17:47:15.107Z @@ -644,6 +658,12 @@ DETACH_LOAD_BALANCERS_TEMPLATE = """ + + 7c6e177f-f082-11e1-ac58-3714bEXAMPLE + +""" + SET_INSTANCE_HEALTH_TEMPLATE = """ diff --git a/moto/awslambda/models.py b/moto/awslambda/models.py index 947691bcf..80b4ffba3 100644 --- a/moto/awslambda/models.py +++ b/moto/awslambda/models.py @@ -104,7 +104,7 @@ class _DockerDataVolumeContext: # It doesn't exist so we need to create it self._vol_ref.volume = self._lambda_func.docker_client.volumes.create(self._lambda_func.code_sha_256) - container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: '/tmp/data'}, detach=True) + container = self._lambda_func.docker_client.containers.run('alpine', 'sleep 100', volumes={self.name: {'bind': '/tmp/data', 'mode': 'rw'}}, detach=True) try: tar_bytes = zip2tar(self._lambda_func.code_bytes) container.put_archive('/tmp/data', tar_bytes) @@ -309,7 +309,7 @@ class LambdaFunction(BaseModel): finally: if container: try: - exit_code = container.wait(timeout=300) + exit_code = container.wait(timeout=300)['StatusCode'] except requests.exceptions.ReadTimeout: exit_code = -1 container.stop() @@ -603,7 +603,7 @@ class LambdaBackend(BaseBackend): def list_functions(self): return self._lambdas.all() - def send_message(self, function_name, message, subject=None): + def send_message(self, function_name, message, subject=None, qualifier=None): event = { "Records": [ { @@ -636,8 +636,8 @@ class LambdaBackend(BaseBackend): ] } - self._functions[function_name][-1].invoke(json.dumps(event), {}, {}) - pass + func = self._lambdas.get_function(function_name, qualifier) + func.invoke(json.dumps(event), {}, {}) def list_tags(self, resource): return self.get_function_by_arn(resource).tags diff --git a/moto/awslambda/responses.py b/moto/awslambda/responses.py index 5676da1ca..2c8a54523 100644 --- a/moto/awslambda/responses.py +++ b/moto/awslambda/responses.py @@ -94,25 +94,21 @@ class LambdaResponse(BaseResponse): return self._add_policy(request, full_url, headers) def _add_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): + if self.lambda_backend.get_function(function_name): policy = request.body.decode('utf8') - lambda_backend.add_policy(function_name, policy) + self.lambda_backend.add_policy(function_name, policy) return 200, {}, json.dumps(dict(Statement=policy)) else: return 404, {}, "{}" def _get_policy(self, request, full_url, headers): - lambda_backend = self.get_lambda_backend(full_url) - path = request.path if hasattr(request, 'path') else request.path_url function_name = path.split('/')[-2] - if lambda_backend.has_function(function_name): - function = lambda_backend.get_function(function_name) - return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + function.policy + "]}")) + if self.lambda_backend.get_function(function_name): + lambda_function = self.lambda_backend.get_function(function_name) + return 200, {}, json.dumps(dict(Policy="{\"Statement\":[" + lambda_function.policy + "]}")) else: return 404, {}, "{}" diff --git a/moto/backends.py b/moto/backends.py index 6baf35f05..d8d317573 100644 --- a/moto/backends.py +++ b/moto/backends.py @@ -6,6 +6,7 @@ from moto.autoscaling import autoscaling_backends from moto.awslambda import lambda_backends from moto.cloudformation import cloudformation_backends from moto.cloudwatch import cloudwatch_backends +from moto.cognitoidentity import cognitoidentity_backends from moto.core import moto_api_backends from moto.datapipeline import datapipeline_backends from moto.dynamodb import dynamodb_backends @@ -34,6 +35,7 @@ from moto.sns import sns_backends from moto.sqs import sqs_backends from moto.ssm import ssm_backends from moto.sts import sts_backends +from moto.swf import swf_backends from moto.xray import xray_backends from moto.iot import iot_backends from moto.iotdata import iotdata_backends @@ -48,6 +50,7 @@ BACKENDS = { 'batch': batch_backends, 'cloudformation': cloudformation_backends, 'cloudwatch': cloudwatch_backends, + 'cognito-identity': cognitoidentity_backends, 'datapipeline': datapipeline_backends, 'dynamodb': dynamodb_backends, 'dynamodb2': dynamodb_backends2, @@ -76,6 +79,7 @@ BACKENDS = { 'sqs': sqs_backends, 'ssm': ssm_backends, 'sts': sts_backends, + 'swf': swf_backends, 'route53': route53_backends, 'lambda': lambda_backends, 'xray': xray_backends, diff --git a/moto/cloudformation/models.py b/moto/cloudformation/models.py index 42809608b..57f42df56 100644 --- a/moto/cloudformation/models.py +++ b/moto/cloudformation/models.py @@ -107,7 +107,8 @@ class FakeStack(BaseModel): def update(self, template, role_arn=None, parameters=None, tags=None): self._add_stack_event("UPDATE_IN_PROGRESS", resource_status_reason="User Initiated") self.template = template - self.resource_map.update(json.loads(template), parameters) + self._parse_template() + self.resource_map.update(self.template_dict, parameters) self.output_map = self._create_output_map() self._add_stack_event("UPDATE_COMPLETE") self.status = "UPDATE_COMPLETE" @@ -188,6 +189,24 @@ class CloudFormationBackend(BaseBackend): self.change_sets[change_set_id] = stack return change_set_id, stack.stack_id + def execute_change_set(self, change_set_name, stack_name=None): + stack = None + if change_set_name in self.change_sets: + # This means arn was passed in + stack = self.change_sets[change_set_name] + else: + for cs in self.change_sets: + if self.change_sets[cs].name == change_set_name: + stack = self.change_sets[cs] + if stack is None: + raise ValidationError(stack_name) + if stack.events[-1].resource_status == 'REVIEW_IN_PROGRESS': + stack._add_stack_event('CREATE_COMPLETE') + else: + stack._add_stack_event('UPDATE_IN_PROGRESS') + stack._add_stack_event('UPDATE_COMPLETE') + return True + def describe_stacks(self, name_or_stack_id): stacks = self.stacks.values() if name_or_stack_id: diff --git a/moto/cloudformation/parsing.py b/moto/cloudformation/parsing.py index e617fa9f8..849d8c917 100644 --- a/moto/cloudformation/parsing.py +++ b/moto/cloudformation/parsing.py @@ -10,6 +10,7 @@ from moto.autoscaling import models as autoscaling_models from moto.awslambda import models as lambda_models from moto.batch import models as batch_models from moto.cloudwatch import models as cloudwatch_models +from moto.cognitoidentity import models as cognitoidentity_models from moto.datapipeline import models as datapipeline_models from moto.dynamodb import models as dynamodb_models from moto.ec2 import models as ec2_models @@ -65,6 +66,7 @@ MODEL_MAP = { "AWS::ElasticLoadBalancingV2::LoadBalancer": elbv2_models.FakeLoadBalancer, "AWS::ElasticLoadBalancingV2::TargetGroup": elbv2_models.FakeTargetGroup, "AWS::ElasticLoadBalancingV2::Listener": elbv2_models.FakeListener, + "AWS::Cognito::IdentityPool": cognitoidentity_models.CognitoIdentity, "AWS::DataPipeline::Pipeline": datapipeline_models.Pipeline, "AWS::IAM::InstanceProfile": iam_models.InstanceProfile, "AWS::IAM::Role": iam_models.Role, @@ -106,6 +108,8 @@ NULL_MODELS = [ "AWS::CloudFormation::WaitConditionHandle", ] +DEFAULT_REGION = 'us-east-1' + logger = logging.getLogger("moto") @@ -203,6 +207,14 @@ def clean_json(resource_json, resources_map): if any(values): return values[0] + if 'Fn::GetAZs' in resource_json: + region = resource_json.get('Fn::GetAZs') or DEFAULT_REGION + result = [] + # TODO: make this configurable, to reflect the real AWS AZs + for az in ('a', 'b', 'c', 'd'): + result.append('%s%s' % (region, az)) + return result + cleaned_json = {} for key, value in resource_json.items(): cleaned_val = clean_json(value, resources_map) diff --git a/moto/cloudformation/responses.py b/moto/cloudformation/responses.py index 93d59f686..a1295a20d 100644 --- a/moto/cloudformation/responses.py +++ b/moto/cloudformation/responses.py @@ -118,6 +118,24 @@ class CloudFormationResponse(BaseResponse): template = self.response_template(CREATE_CHANGE_SET_RESPONSE_TEMPLATE) return template.render(stack_id=stack_id, change_set_id=change_set_id) + @amzn_request_id + def execute_change_set(self): + stack_name = self._get_param('StackName') + change_set_name = self._get_param('ChangeSetName') + self.cloudformation_backend.execute_change_set( + stack_name=stack_name, + change_set_name=change_set_name, + ) + if self.request_json: + return json.dumps({ + 'ExecuteChangeSetResponse': { + 'ExecuteChangeSetResult': {}, + } + }) + else: + template = self.response_template(EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE) + return template.render() + def describe_stacks(self): stack_name_or_id = None if self._get_param('StackName'): @@ -203,19 +221,25 @@ class CloudFormationResponse(BaseResponse): stack_name = self._get_param('StackName') role_arn = self._get_param('RoleARN') template_url = self._get_param('TemplateURL') + stack_body = self._get_param('TemplateBody') + stack = self.cloudformation_backend.get_stack(stack_name) if self._get_param('UsePreviousTemplate') == "true": - stack_body = self.cloudformation_backend.get_stack( - stack_name).template - elif template_url: + stack_body = stack.template + elif not stack_body and template_url: stack_body = self._get_stack_from_s3_url(template_url) - else: - stack_body = self._get_param('TemplateBody') + incoming_params = self._get_list_prefix("Parameters.member") parameters = dict([ (parameter['parameter_key'], parameter['parameter_value']) for parameter - in self._get_list_prefix("Parameters.member") + in incoming_params if 'parameter_value' in parameter ]) + previous = dict([ + (parameter['parameter_key'], stack.parameters[parameter['parameter_key']]) + for parameter + in incoming_params if 'use_previous_value' in parameter + ]) + parameters.update(previous) # boto3 is supposed to let you clear the tags by passing an empty value, but the request body doesn't # end up containing anything we can use to differentiate between passing an empty value versus not # passing anything. so until that changes, moto won't be able to clear tags, only update them. @@ -302,6 +326,16 @@ CREATE_CHANGE_SET_RESPONSE_TEMPLATE = """ """ +EXECUTE_CHANGE_SET_RESPONSE_TEMPLATE = """ + + + + + {{ request_id }} + + +""" + DESCRIBE_STACKS_TEMPLATE = """ diff --git a/moto/cloudwatch/models.py b/moto/cloudwatch/models.py index 395f4f0ba..ba6569981 100644 --- a/moto/cloudwatch/models.py +++ b/moto/cloudwatch/models.py @@ -74,18 +74,18 @@ class FakeAlarm(BaseModel): self.state_reason = '' self.state_reason_data = '{}' - self.state = 'OK' + self.state_value = 'OK' self.state_updated_timestamp = datetime.utcnow() def update_state(self, reason, reason_data, state_value): # History type, that then decides what the rest of the items are, can be one of ConfigurationUpdate | StateUpdate | Action self.history.append( - ('StateUpdate', self.state_reason, self.state_reason_data, self.state, self.state_updated_timestamp) + ('StateUpdate', self.state_reason, self.state_reason_data, self.state_value, self.state_updated_timestamp) ) self.state_reason = reason self.state_reason_data = reason_data - self.state = state_value + self.state_value = state_value self.state_updated_timestamp = datetime.utcnow() @@ -221,7 +221,7 @@ class CloudWatchBackend(BaseBackend): ] def get_alarms_by_state_value(self, target_state): - return filter(lambda alarm: alarm.state == target_state, self.alarms.values()) + return filter(lambda alarm: alarm.state_value == target_state, self.alarms.values()) def delete_alarms(self, alarm_names): for alarm_name in alarm_names: diff --git a/moto/cognitoidentity/__init__.py b/moto/cognitoidentity/__init__.py new file mode 100644 index 000000000..2f040fa19 --- /dev/null +++ b/moto/cognitoidentity/__init__.py @@ -0,0 +1,7 @@ +from __future__ import unicode_literals +from .models import cognitoidentity_backends +from ..core.models import base_decorator, deprecated_base_decorator + +cognitoidentity_backend = cognitoidentity_backends['us-east-1'] +mock_cognitoidentity = base_decorator(cognitoidentity_backends) +mock_cognitoidentity_deprecated = deprecated_base_decorator(cognitoidentity_backends) diff --git a/moto/cognitoidentity/models.py b/moto/cognitoidentity/models.py new file mode 100644 index 000000000..daa2a4641 --- /dev/null +++ b/moto/cognitoidentity/models.py @@ -0,0 +1,101 @@ +from __future__ import unicode_literals + +import datetime +import json + +import boto.cognito.identity + +from moto.compat import OrderedDict +from moto.core import BaseBackend, BaseModel +from moto.core.utils import iso_8601_datetime_with_milliseconds + +from .utils import get_random_identity_id + + +class CognitoIdentity(BaseModel): + + def __init__(self, region, identity_pool_name, **kwargs): + self.identity_pool_name = identity_pool_name + self.allow_unauthenticated_identities = kwargs.get('allow_unauthenticated_identities', '') + self.supported_login_providers = kwargs.get('supported_login_providers', {}) + self.developer_provider_name = kwargs.get('developer_provider_name', '') + self.open_id_connect_provider_arns = kwargs.get('open_id_connect_provider_arns', []) + self.cognito_identity_providers = kwargs.get('cognito_identity_providers', []) + self.saml_provider_arns = kwargs.get('saml_provider_arns', []) + + self.identity_pool_id = get_random_identity_id(region) + self.creation_time = datetime.datetime.utcnow() + + +class CognitoIdentityBackend(BaseBackend): + + def __init__(self, region): + super(CognitoIdentityBackend, self).__init__() + self.region = region + self.identity_pools = OrderedDict() + + def reset(self): + region = self.region + self.__dict__ = {} + self.__init__(region) + + def create_identity_pool(self, identity_pool_name, allow_unauthenticated_identities, + supported_login_providers, developer_provider_name, open_id_connect_provider_arns, + cognito_identity_providers, saml_provider_arns): + + new_identity = CognitoIdentity(self.region, identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) + self.identity_pools[new_identity.identity_pool_id] = new_identity + + response = json.dumps({ + 'IdentityPoolId': new_identity.identity_pool_id, + 'IdentityPoolName': new_identity.identity_pool_name, + 'AllowUnauthenticatedIdentities': new_identity.allow_unauthenticated_identities, + 'SupportedLoginProviders': new_identity.supported_login_providers, + 'DeveloperProviderName': new_identity.developer_provider_name, + 'OpenIdConnectProviderARNs': new_identity.open_id_connect_provider_arns, + 'CognitoIdentityProviders': new_identity.cognito_identity_providers, + 'SamlProviderARNs': new_identity.saml_provider_arns + }) + + return response + + def get_id(self): + identity_id = {'IdentityId': get_random_identity_id(self.region)} + return json.dumps(identity_id) + + def get_credentials_for_identity(self, identity_id): + duration = 90 + now = datetime.datetime.utcnow() + expiration = now + datetime.timedelta(seconds=duration) + expiration_str = str(iso_8601_datetime_with_milliseconds(expiration)) + response = json.dumps( + { + "Credentials": + { + "AccessKeyId": "TESTACCESSKEY12345", + "Expiration": expiration_str, + "SecretKey": "ABCSECRETKEY", + "SessionToken": "ABC12345" + }, + "IdentityId": identity_id + }) + return response + + def get_open_id_token_for_developer_identity(self, identity_id): + response = json.dumps( + { + "IdentityId": identity_id, + "Token": get_random_identity_id(self.region) + }) + return response + + +cognitoidentity_backends = {} +for region in boto.cognito.identity.regions(): + cognitoidentity_backends[region.name] = CognitoIdentityBackend(region.name) diff --git a/moto/cognitoidentity/responses.py b/moto/cognitoidentity/responses.py new file mode 100644 index 000000000..ea54b2cff --- /dev/null +++ b/moto/cognitoidentity/responses.py @@ -0,0 +1,34 @@ +from __future__ import unicode_literals + +from moto.core.responses import BaseResponse + +from .models import cognitoidentity_backends + + +class CognitoIdentityResponse(BaseResponse): + + def create_identity_pool(self): + identity_pool_name = self._get_param('IdentityPoolName') + allow_unauthenticated_identities = self._get_param('AllowUnauthenticatedIdentities') + supported_login_providers = self._get_param('SupportedLoginProviders') + developer_provider_name = self._get_param('DeveloperProviderName') + open_id_connect_provider_arns = self._get_param('OpenIdConnectProviderARNs') + cognito_identity_providers = self._get_param('CognitoIdentityProviders') + saml_provider_arns = self._get_param('SamlProviderARNs') + return cognitoidentity_backends[self.region].create_identity_pool( + identity_pool_name=identity_pool_name, + allow_unauthenticated_identities=allow_unauthenticated_identities, + supported_login_providers=supported_login_providers, + developer_provider_name=developer_provider_name, + open_id_connect_provider_arns=open_id_connect_provider_arns, + cognito_identity_providers=cognito_identity_providers, + saml_provider_arns=saml_provider_arns) + + def get_id(self): + return cognitoidentity_backends[self.region].get_id() + + def get_credentials_for_identity(self): + return cognitoidentity_backends[self.region].get_credentials_for_identity(self._get_param('IdentityId')) + + def get_open_id_token_for_developer_identity(self): + return cognitoidentity_backends[self.region].get_open_id_token_for_developer_identity(self._get_param('IdentityId')) diff --git a/moto/cognitoidentity/urls.py b/moto/cognitoidentity/urls.py new file mode 100644 index 000000000..3fe63ef07 --- /dev/null +++ b/moto/cognitoidentity/urls.py @@ -0,0 +1,10 @@ +from __future__ import unicode_literals +from .responses import CognitoIdentityResponse + +url_bases = [ + "https?://cognito-identity.(.+).amazonaws.com", +] + +url_paths = { + '{0}/$': CognitoIdentityResponse.dispatch, +} diff --git a/moto/cognitoidentity/utils.py b/moto/cognitoidentity/utils.py new file mode 100644 index 000000000..359631763 --- /dev/null +++ b/moto/cognitoidentity/utils.py @@ -0,0 +1,5 @@ +from moto.core.utils import get_random_hex + + +def get_random_identity_id(region): + return "{0}:{0}".format(region, get_random_hex(length=19)) diff --git a/moto/core/models.py b/moto/core/models.py index c6fb72ffa..92dc2a980 100644 --- a/moto/core/models.py +++ b/moto/core/models.py @@ -9,7 +9,7 @@ import re import six from moto import settings -from moto.packages.responses import responses +import responses from moto.packages.httpretty import HTTPretty from .utils import ( convert_httpretty_response, @@ -124,31 +124,102 @@ RESPONSES_METHODS = [responses.GET, responses.DELETE, responses.HEAD, responses.OPTIONS, responses.PATCH, responses.POST, responses.PUT] -class ResponsesMockAWS(BaseMockAWS): +class CallbackResponse(responses.CallbackResponse): + ''' + Need to subclass so we can change a couple things + ''' + def get_response(self, request): + ''' + Need to override this so we can pass decode_content=False + ''' + headers = self.get_headers() + result = self.callback(request) + if isinstance(result, Exception): + raise result + + status, r_headers, body = result + body = responses._handle_body(body) + headers.update(r_headers) + + return responses.HTTPResponse( + status=status, + reason=six.moves.http_client.responses.get(status), + body=body, + headers=headers, + preload_content=False, + # Need to not decode_content to mimic requests + decode_content=False, + ) + + def _url_matches(self, url, other, match_querystring=False): + ''' + Need to override this so we can fix querystrings breaking regex matching + ''' + if not match_querystring: + other = other.split('?', 1)[0] + + if responses._is_string(url): + if responses._has_unicode(url): + url = responses._clean_unicode(url) + if not isinstance(other, six.text_type): + other = other.encode('ascii').decode('utf8') + return self._url_matches_strict(url, other) + elif isinstance(url, responses.Pattern) and url.match(other): + return True + else: + return False + + +botocore_mock = responses.RequestsMock(assert_all_requests_are_fired=False, target='botocore.vendored.requests.adapters.HTTPAdapter.send') +responses_mock = responses._default_mock + + +class ResponsesMockAWS(BaseMockAWS): def reset(self): - responses.reset() + botocore_mock.reset() + responses_mock.reset() def enable_patching(self): - responses.start() + if not hasattr(botocore_mock, '_patcher') or not hasattr(botocore_mock._patcher, 'target'): + # Check for unactivated patcher + botocore_mock.start() + + if not hasattr(responses_mock, '_patcher') or not hasattr(responses_mock._patcher, 'target'): + responses_mock.start() + for method in RESPONSES_METHODS: for backend in self.backends_for_urls.values(): for key, value in backend.urls.items(): - responses.add_callback( - method=method, - url=re.compile(key), - callback=convert_flask_to_responses_response(value), + responses_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) + ) + botocore_mock.add( + CallbackResponse( + method=method, + url=re.compile(key), + callback=convert_flask_to_responses_response(value), + stream=True, + match_querystring=False, + ) ) - - for pattern in responses.mock._urls: - pattern['stream'] = True def disable_patching(self): try: - responses.stop() - except AttributeError: + botocore_mock.stop() + except RuntimeError: + pass + + try: + responses_mock.stop() + except RuntimeError: pass - responses.reset() MockAWS = ResponsesMockAWS diff --git a/moto/core/responses.py b/moto/core/responses.py index 52be602f6..ed4792083 100644 --- a/moto/core/responses.py +++ b/moto/core/responses.py @@ -108,6 +108,7 @@ class BaseResponse(_TemplateEnvironmentMixin): # to extract region, use [^.] region_regex = re.compile(r'\.(?P[a-z]{2}-[a-z]+-\d{1})\.amazonaws\.com') param_list_regex = re.compile(r'(.*)\.(\d+)\.') + access_key_regex = re.compile(r'AWS.*(?P(? len(param_prefix) and \ + not name[len(param_prefix):].startswith('.'): + continue + match = self.param_list_regex.search(name[len(param_prefix):]) if len(name) > len(param_prefix) else None if match: prefix = param_prefix + match.group(1) @@ -469,6 +492,54 @@ class BaseResponse(_TemplateEnvironmentMixin): return results + def _get_object_map(self, prefix, name='Name', value='Value'): + """ + Given a query dict like + { + Prefix.1.Name: [u'event'], + Prefix.1.Value.StringValue: [u'order_cancelled'], + Prefix.1.Value.DataType: [u'String'], + Prefix.2.Name: [u'store'], + Prefix.2.Value.StringValue: [u'example_corp'], + Prefix.2.Value.DataType [u'String'], + } + + returns + { + 'event': { + 'DataType': 'String', + 'StringValue': 'example_corp' + }, + 'store': { + 'DataType': 'String', + 'StringValue': 'order_cancelled' + } + } + """ + object_map = {} + index = 1 + while True: + # Loop through looking for keys representing object name + name_key = '{0}.{1}.{2}'.format(prefix, index, name) + obj_name = self.querystring.get(name_key) + if not obj_name: + # Found all keys + break + + obj = {} + value_key_prefix = '{0}.{1}.{2}.'.format( + prefix, index, value) + for k, v in self.querystring.items(): + if k.startswith(value_key_prefix): + _, value_key = k.split(value_key_prefix, 1) + obj[value_key] = v[0] + + object_map[obj_name[0]] = obj + + index += 1 + + return object_map + @property def request_json(self): return 'JSON' in self.querystring.get('ContentType', []) @@ -551,7 +622,7 @@ class AWSServiceSpec(object): def __init__(self, path): self.path = resource_filename('botocore', path) - with open(self.path) as f: + with open(self.path, "rb") as f: spec = json.load(f) self.metadata = spec['metadata'] self.operations = spec['operations'] diff --git a/moto/core/utils.py b/moto/core/utils.py index 43f05672e..86e7632b0 100644 --- a/moto/core/utils.py +++ b/moto/core/utils.py @@ -18,6 +18,8 @@ def camelcase_to_underscores(argument): python underscore variable like the_new_attribute''' result = '' prev_char_title = True + if not argument: + return argument for index, char in enumerate(argument): try: next_char_title = argument[index + 1].istitle() diff --git a/moto/dynamodb2/comparisons.py b/moto/dynamodb2/comparisons.py index 68051460e..51d62fb83 100644 --- a/moto/dynamodb2/comparisons.py +++ b/moto/dynamodb2/comparisons.py @@ -176,6 +176,8 @@ def get_filter_expression(expr, names, values): next_token = six.next(token_iterator) while next_token != ')': + if next_token in values_map: + next_token = values_map[next_token] function_list.append(next_token) next_token = six.next(token_iterator) diff --git a/moto/dynamodb2/models.py b/moto/dynamodb2/models.py index 0a48c277a..73b09d73c 100644 --- a/moto/dynamodb2/models.py +++ b/moto/dynamodb2/models.py @@ -135,7 +135,9 @@ class Item(BaseModel): assert len(parts) % 2 == 0, "Mismatched operators and values in update expression: '{}'".format(update_expression) for action, valstr in zip(parts[:-1:2], parts[1::2]): action = action.upper() - values = valstr.split(',') + + # "Should" retain arguments in side (...) + values = re.split(r',(?![^(]*\))', valstr) for value in values: # A Real value value = value.lstrip(":").rstrip(",").strip() @@ -145,9 +147,23 @@ class Item(BaseModel): if action == "REMOVE": self.attrs.pop(value, None) elif action == 'SET': - key, value = value.split("=") + key, value = value.split("=", 1) key = key.strip() value = value.strip() + + # If not exists, changes value to a default if needed, else its the same as it was + if value.startswith('if_not_exists'): + # Function signature + match = re.match(r'.*if_not_exists\((?P.+),\s*(?P.+)\).*', value) + if not match: + raise TypeError + + path, value = match.groups() + + # If it already exists, get its value so we dont overwrite it + if path in self.attrs: + value = self.attrs[path].cast_value + if value in expression_attribute_values: value = DynamoType(expression_attribute_values[value]) else: @@ -520,14 +536,6 @@ class Table(BaseModel): else: results.sort(key=lambda item: item.range_key) - if projection_expression: - expressions = [x.strip() for x in projection_expression.split(',')] - results = copy.deepcopy(results) - for result in results: - for attr in list(result.attrs): - if attr not in expressions: - result.attrs.pop(attr) - if scan_index_forward is False: results.reverse() @@ -536,6 +544,14 @@ class Table(BaseModel): if filter_expression is not None: results = [item for item in results if filter_expression.expr(item)] + if projection_expression: + expressions = [x.strip() for x in projection_expression.split(',')] + results = copy.deepcopy(results) + for result in results: + for attr in list(result.attrs): + if attr not in expressions: + result.attrs.pop(attr) + results, last_evaluated_key = self._trim_results(results, limit, exclusive_start_key) return results, scanned_count, last_evaluated_key diff --git a/moto/dynamodb2/responses.py b/moto/dynamodb2/responses.py index 952d33efa..3c7e7ffc2 100644 --- a/moto/dynamodb2/responses.py +++ b/moto/dynamodb2/responses.py @@ -8,6 +8,18 @@ from moto.core.utils import camelcase_to_underscores, amzn_request_id from .models import dynamodb_backends, dynamo_json_dump +def has_empty_keys_or_values(_dict): + if _dict == "": + return True + if not isinstance(_dict, dict): + return False + return any( + key == '' or value == '' or + has_empty_keys_or_values(value) + for key, value in _dict.items() + ) + + class DynamoHandler(BaseResponse): def get_endpoint_name(self, headers): @@ -161,8 +173,7 @@ class DynamoHandler(BaseResponse): name = self.body['TableName'] item = self.body['Item'] - res = re.search('\"\"', json.dumps(item)) - if res: + if has_empty_keys_or_values(item): er = 'com.amazonaws.dynamodb.v20111205#ValidationException' return (400, {'server': 'amazon.com'}, diff --git a/moto/ec2/exceptions.py b/moto/ec2/exceptions.py index 5cff527be..f747c9cd5 100644 --- a/moto/ec2/exceptions.py +++ b/moto/ec2/exceptions.py @@ -280,6 +280,15 @@ class InvalidAssociationIdError(EC2ClientError): .format(association_id)) +class InvalidVpcCidrBlockAssociationIdError(EC2ClientError): + + def __init__(self, association_id): + super(InvalidVpcCidrBlockAssociationIdError, self).__init__( + "InvalidVpcCidrBlockAssociationIdError.NotFound", + "The vpc CIDR block association ID '{0}' does not exist" + .format(association_id)) + + class InvalidVPCPeeringConnectionIdError(EC2ClientError): def __init__(self, vpc_peering_connection_id): @@ -392,3 +401,22 @@ class FilterNotImplementedError(MotoNotImplementedError): super(FilterNotImplementedError, self).__init__( "The filter '{0}' for {1}".format( filter_name, method_name)) + + +class CidrLimitExceeded(EC2ClientError): + + def __init__(self, vpc_id, max_cidr_limit): + super(CidrLimitExceeded, self).__init__( + "CidrLimitExceeded", + "This network '{0}' has met its maximum number of allowed CIDRs: {1}".format(vpc_id, max_cidr_limit) + ) + + +class OperationNotPermitted(EC2ClientError): + + def __init__(self, association_id): + super(OperationNotPermitted, self).__init__( + "OperationNotPermitted", + "The vpc CIDR block with association ID {} may not be disassociated. " + "It is the primary IPv4 CIDR block of the VPC".format(association_id) + ) diff --git a/moto/ec2/models.py b/moto/ec2/models.py index 80bbf8439..31bfb4839 100755 --- a/moto/ec2/models.py +++ b/moto/ec2/models.py @@ -24,51 +24,54 @@ from moto.core import BaseBackend from moto.core.models import Model, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores from .exceptions import ( - EC2ClientError, + CidrLimitExceeded, DependencyViolationError, - MissingParameterError, + EC2ClientError, + FilterNotImplementedError, + GatewayNotAttachedError, + InvalidAddressError, + InvalidAllocationIdError, + InvalidAMIIdError, + InvalidAMIAttributeItemValueError, + InvalidAssociationIdError, + InvalidCIDRSubnetError, + InvalidCustomerGatewayIdError, + InvalidDHCPOptionsIdError, + InvalidDomainError, + InvalidID, + InvalidInstanceIdError, + InvalidInternetGatewayIdError, + InvalidKeyPairDuplicateError, + InvalidKeyPairNameError, + InvalidNetworkAclIdError, + InvalidNetworkAttachmentIdError, + InvalidNetworkInterfaceIdError, InvalidParameterValueError, InvalidParameterValueErrorTagNull, - InvalidDHCPOptionsIdError, - MalformedDHCPOptionsIdError, - InvalidKeyPairNameError, - InvalidKeyPairDuplicateError, - InvalidInternetGatewayIdError, - GatewayNotAttachedError, - ResourceAlreadyAssociatedError, - InvalidVPCIdError, - InvalidSubnetIdError, - InvalidNetworkInterfaceIdError, - InvalidNetworkAttachmentIdError, - InvalidSecurityGroupDuplicateError, - InvalidSecurityGroupNotFoundError, InvalidPermissionNotFoundError, InvalidPermissionDuplicateError, InvalidRouteTableIdError, InvalidRouteError, - InvalidInstanceIdError, - InvalidAMIIdError, - InvalidAMIAttributeItemValueError, + InvalidSecurityGroupDuplicateError, + InvalidSecurityGroupNotFoundError, InvalidSnapshotIdError, + InvalidSubnetIdError, InvalidVolumeIdError, InvalidVolumeAttachmentError, - InvalidDomainError, - InvalidAddressError, - InvalidAllocationIdError, - InvalidAssociationIdError, + InvalidVpcCidrBlockAssociationIdError, InvalidVPCPeeringConnectionIdError, InvalidVPCPeeringConnectionStateTransitionError, - TagLimitExceeded, - InvalidID, - InvalidCIDRSubnetError, - InvalidNetworkAclIdError, + InvalidVPCIdError, InvalidVpnGatewayIdError, InvalidVpnConnectionIdError, - InvalidCustomerGatewayIdError, - RulesPerSecurityGroupLimitExceededError, + MalformedAMIIdError, + MalformedDHCPOptionsIdError, + MissingParameterError, MotoNotImplementedError, - FilterNotImplementedError -) + OperationNotPermitted, + ResourceAlreadyAssociatedError, + RulesPerSecurityGroupLimitExceededError, + TagLimitExceeded) from .utils import ( EC2_RESOURCE_TO_PREFIX, EC2_PREFIX_TO_RESOURCE, @@ -81,6 +84,7 @@ from .utils import ( random_instance_id, random_internet_gateway_id, random_ip, + random_ipv6_cidr, random_nat_gateway_id, random_key_pair, random_private_ip, @@ -97,6 +101,7 @@ from .utils import ( random_subnet_association_id, random_volume_id, random_vpc_id, + random_vpc_cidr_association_id, random_vpc_peering_connection_id, generic_filter, is_valid_resource_id, @@ -1031,12 +1036,11 @@ class TagBackend(object): class Ami(TaggedEC2Resource): def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None, - name=None, description=None, owner_id=None, - + name=None, description=None, owner_id=111122223333, public=False, virtualization_type=None, architecture=None, state='available', creation_date=None, platform=None, image_type='machine', image_location=None, hypervisor=None, - root_device_type=None, root_device_name=None, sriov='simple', + root_device_type='standard', root_device_name='/dev/sda1', sriov='simple', region_name='us-east-1a' ): self.ec2_backend = ec2_backend @@ -1089,7 +1093,8 @@ class Ami(TaggedEC2Resource): # AWS auto-creates these, we should reflect the same. volume = self.ec2_backend.create_volume(15, region_name) self.ebs_snapshot = self.ec2_backend.create_snapshot( - volume.id, "Auto-created snapshot for AMI %s" % self.id) + volume.id, "Auto-created snapshot for AMI %s" % self.id, owner_id) + self.ec2_backend.delete_volume(volume.id) @property def is_public(self): @@ -1122,6 +1127,9 @@ class Ami(TaggedEC2Resource): class AmiBackend(object): + + AMI_REGEX = re.compile("ami-[a-z0-9]+") + def __init__(self): self.amis = {} @@ -1134,12 +1142,14 @@ class AmiBackend(object): ami_id = ami['ami_id'] self.amis[ami_id] = Ami(self, **ami) - def create_image(self, instance_id, name=None, description=None, owner_id=None): + def create_image(self, instance_id, name=None, description=None, context=None): # TODO: check that instance exists and pull info from it. ami_id = random_ami_id() instance = self.get_instance(instance_id) + ami = Ami(self, ami_id, instance=instance, source_ami=None, - name=name, description=description, owner_id=owner_id) + name=name, description=description, + owner_id=context.get_current_user() if context else '111122223333') self.amis[ami_id] = ami return ami @@ -1152,28 +1162,43 @@ class AmiBackend(object): self.amis[ami_id] = ami return ami - def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None): + def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None, + context=None): images = self.amis.values() - # Limit images by launch permissions - if exec_users: - tmp_images = [] - for ami in images: - for user_id in exec_users: - if user_id in ami.launch_permission_users: - tmp_images.append(ami) - images = tmp_images + if len(ami_ids): + # boto3 seems to default to just searching based on ami ids if that parameter is passed + # and if no images are found, it raises an errors + malformed_ami_ids = [ami_id for ami_id in ami_ids if not ami_id.startswith('ami-')] + if malformed_ami_ids: + raise MalformedAMIIdError(malformed_ami_ids) - # Limit by owner ids - if owners: - images = [ami for ami in images if ami.owner_id in owners] - - if ami_ids: images = [ami for ami in images if ami.id in ami_ids] + if len(images) == 0: + raise InvalidAMIIdError(ami_ids) + else: + # Limit images by launch permissions + if exec_users: + tmp_images = [] + for ami in images: + for user_id in exec_users: + if user_id in ami.launch_permission_users: + tmp_images.append(ami) + images = tmp_images + + # Limit by owner ids + if owners: + # support filtering by Owners=['self'] + owners = list(map( + lambda o: context.get_current_user() + if context and o == 'self' else o, + owners)) + images = [ami for ami in images if ami.owner_id in owners] + + # Generic filters + if filters: + return generic_filter(filters, images) - # Generic filters - if filters: - return generic_filter(filters, images) return images def deregister_image(self, ami_id): @@ -1251,8 +1276,15 @@ class RegionsAndZonesBackend(object): (region, [Zone(region + c, region) for c in 'abc']) for region in [r.name for r in regions]) - def describe_regions(self): - return self.regions + def describe_regions(self, region_names=[]): + if len(region_names) == 0: + return self.regions + ret = [] + for name in region_names: + for region in self.regions: + if region.name == name: + ret.append(region) + return ret def describe_availability_zones(self): return self.zones[self.region_name] @@ -1683,6 +1715,7 @@ class SecurityGroupIngress(object): group_id = properties.get('GroupId') ip_protocol = properties.get("IpProtocol") cidr_ip = properties.get("CidrIp") + cidr_ipv6 = properties.get("CidrIpv6") from_port = properties.get("FromPort") source_security_group_id = properties.get("SourceSecurityGroupId") source_security_group_name = properties.get("SourceSecurityGroupName") @@ -1691,7 +1724,7 @@ class SecurityGroupIngress(object): to_port = properties.get("ToPort") assert group_id or group_name - assert source_security_group_name or cidr_ip or source_security_group_id + assert source_security_group_name or cidr_ip or cidr_ipv6 or source_security_group_id assert ip_protocol if source_security_group_id: @@ -1807,13 +1840,15 @@ class Volume(TaggedEC2Resource): return self.id elif filter_name == 'encrypted': return str(self.encrypted).lower() + elif filter_name == 'availability-zone': + return self.zone.name else: return super(Volume, self).get_filter_value( filter_name, 'DescribeVolumes') class Snapshot(TaggedEC2Resource): - def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False): + def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False, owner_id='123456789012'): self.id = snapshot_id self.volume = volume self.description = description @@ -1822,6 +1857,7 @@ class Snapshot(TaggedEC2Resource): self.ec2_backend = ec2_backend self.status = 'completed' self.encrypted = encrypted + self.owner_id = owner_id def get_filter_value(self, filter_name): if filter_name == 'description': @@ -1913,11 +1949,13 @@ class EBSBackend(object): volume.attachment = None return old_attachment - def create_snapshot(self, volume_id, description): + def create_snapshot(self, volume_id, description, owner_id=None): snapshot_id = random_snapshot_id() volume = self.get_volume(volume_id) - snapshot = Snapshot(self, snapshot_id, volume, - description, volume.encrypted) + params = [self, snapshot_id, volume, description, volume.encrypted] + if owner_id: + params.append(owner_id) + snapshot = Snapshot(*params) self.snapshots[snapshot_id] = snapshot return snapshot @@ -1933,6 +1971,15 @@ class EBSBackend(object): matches = generic_filter(filters, matches) return matches + def copy_snapshot(self, source_snapshot_id, source_region, description=None): + source_snapshot = ec2_backends[source_region].describe_snapshots( + snapshot_ids=[source_snapshot_id])[0] + snapshot_id = random_snapshot_id() + snapshot = Snapshot(self, snapshot_id, volume=source_snapshot.volume, + description=description, encrypted=source_snapshot.encrypted) + self.snapshots[snapshot_id] = snapshot + return snapshot + def get_snapshot(self, snapshot_id): snapshot = self.snapshots.get(snapshot_id, None) if not snapshot: @@ -1972,10 +2019,13 @@ class EBSBackend(object): class VPC(TaggedEC2Resource): - def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default'): + def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default', + amazon_provided_ipv6_cidr_block=False): + self.ec2_backend = ec2_backend self.id = vpc_id self.cidr_block = cidr_block + self.cidr_block_association_set = {} self.dhcp_options = None self.state = 'available' self.instance_tenancy = instance_tenancy @@ -1985,6 +2035,10 @@ class VPC(TaggedEC2Resource): # or VPCs created using the wizard of the VPC console self.enable_dns_hostnames = 'true' if is_default else 'false' + self.associate_vpc_cidr_block(cidr_block) + if amazon_provided_ipv6_cidr_block: + self.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block) + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -1994,6 +2048,11 @@ class VPC(TaggedEC2Resource): cidr_block=properties['CidrBlock'], instance_tenancy=properties.get('InstanceTenancy', 'default') ) + for tag in properties.get("Tags", []): + tag_key = tag["Key"] + tag_value = tag["Value"] + vpc.add_tag(tag_key, tag_value) + return vpc @property @@ -2005,6 +2064,12 @@ class VPC(TaggedEC2Resource): return self.id elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'): return self.cidr_block + elif filter_name in ('cidr-block-association.cidr-block', 'ipv6-cidr-block-association.ipv6-cidr-block'): + return [c['cidr_block'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] + elif filter_name in ('cidr-block-association.association-id', 'ipv6-cidr-block-association.association-id'): + return self.cidr_block_association_set.keys() + elif filter_name in ('cidr-block-association.state', 'ipv6-cidr-block-association.state'): + return [c['cidr_block_state']['state'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)] elif filter_name in ('instance_tenancy', 'InstanceTenancy'): return self.instance_tenancy elif filter_name in ('is-default', 'isDefault'): @@ -2016,8 +2081,37 @@ class VPC(TaggedEC2Resource): return None return self.dhcp_options.id else: - return super(VPC, self).get_filter_value( - filter_name, 'DescribeVpcs') + return super(VPC, self).get_filter_value(filter_name, 'DescribeVpcs') + + def associate_vpc_cidr_block(self, cidr_block, amazon_provided_ipv6_cidr_block=False): + max_associations = 5 if not amazon_provided_ipv6_cidr_block else 1 + + if len(self.get_cidr_block_association_set(amazon_provided_ipv6_cidr_block)) >= max_associations: + raise CidrLimitExceeded(self.id, max_associations) + + association_id = random_vpc_cidr_association_id() + + association_set = { + 'association_id': association_id, + 'cidr_block_state': {'state': 'associated', 'StatusMessage': ''} + } + + association_set['cidr_block'] = random_ipv6_cidr() if amazon_provided_ipv6_cidr_block else cidr_block + self.cidr_block_association_set[association_id] = association_set + return association_set + + def disassociate_vpc_cidr_block(self, association_id): + if self.cidr_block == self.cidr_block_association_set.get(association_id, {}).get('cidr_block'): + raise OperationNotPermitted(association_id) + + response = self.cidr_block_association_set.pop(association_id, {}) + if response: + response['vpc_id'] = self.id + response['cidr_block_state']['state'] = 'disassociating' + return response + + def get_cidr_block_association_set(self, ipv6=False): + return [c for c in self.cidr_block_association_set.values() if ('::/' if ipv6 else '.') in c.get('cidr_block')] class VPCBackend(object): @@ -2025,10 +2119,9 @@ class VPCBackend(object): self.vpcs = {} super(VPCBackend, self).__init__() - def create_vpc(self, cidr_block, instance_tenancy='default'): + def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False): vpc_id = random_vpc_id() - vpc = VPC(self, vpc_id, cidr_block, len( - self.vpcs) == 0, instance_tenancy) + vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block) self.vpcs[vpc_id] = vpc # AWS creates a default main route table and security group. @@ -2101,6 +2194,18 @@ class VPCBackend(object): else: raise InvalidParameterValueError(attr_name) + def disassociate_vpc_cidr_block(self, association_id): + for vpc in self.vpcs.values(): + response = vpc.disassociate_vpc_cidr_block(association_id) + if response: + return response + else: + raise InvalidVpcCidrBlockAssociationIdError(association_id) + + def associate_vpc_cidr_block(self, vpc_id, cidr_block, amazon_provided_ipv6_cidr_block): + vpc = self.get_vpc(vpc_id) + return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block) + class VPCPeeringConnectionStatus(object): def __init__(self, code='initiating-request', message=''): @@ -2559,7 +2664,7 @@ class Route(object): ec2_backend = ec2_backends[region_name] route_table = ec2_backend.create_route( route_table_id=route_table_id, - destination_cidr_block=properties['DestinationCidrBlock'], + destination_cidr_block=properties.get('DestinationCidrBlock'), gateway_id=gateway_id, instance_id=instance_id, interface_id=interface_id, @@ -2912,7 +3017,7 @@ class SpotFleetRequest(TaggedEC2Resource): 'Properties']['SpotFleetRequestConfigData'] ec2_backend = ec2_backends[region_name] - spot_price = properties['SpotPrice'] + spot_price = properties.get('SpotPrice') target_capacity = properties['TargetCapacity'] iam_fleet_role = properties['IamFleetRole'] allocation_strategy = properties['AllocationStrategy'] @@ -2946,7 +3051,8 @@ class SpotFleetRequest(TaggedEC2Resource): launch_spec_index += 1 else: # lowestPrice cheapest_spec = sorted( - self.launch_specs, key=lambda spec: float(spec.spot_price))[0] + # FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present + self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0] weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity) weight_map[cheapest_spec] = int( weight_so_far // cheapest_spec.weighted_capacity) diff --git a/moto/ec2/responses/amis.py b/moto/ec2/responses/amis.py index 19e6d31a1..17e1e228d 100755 --- a/moto/ec2/responses/amis.py +++ b/moto/ec2/responses/amis.py @@ -11,7 +11,7 @@ class AmisResponse(BaseResponse): instance_id = self._get_param('InstanceId') if self.is_not_dryrun('CreateImage'): image = self.ec2_backend.create_image( - instance_id, name, description) + instance_id, name, description, context=self) template = self.response_template(CREATE_IMAGE_RESPONSE) return template.render(image=image) @@ -39,7 +39,8 @@ class AmisResponse(BaseResponse): owners = self._get_multi_param('Owner') exec_users = self._get_multi_param('ExecutableBy') images = self.ec2_backend.describe_images( - ami_ids=ami_ids, filters=filters, exec_users=exec_users, owners=owners) + ami_ids=ami_ids, filters=filters, exec_users=exec_users, + owners=owners, context=self) template = self.response_template(DESCRIBE_IMAGES_RESPONSE) return template.render(images=images) @@ -112,12 +113,12 @@ DESCRIBE_IMAGES_RESPONSE = """ + 59dbff89-35bd-4eac-99ed-be587EXAMPLE + {{ snapshot.id }} +""" + DESCRIBE_SNAPSHOTS_RESPONSE = """ 59dbff89-35bd-4eac-99ed-be587EXAMPLE @@ -232,7 +268,7 @@ DESCRIBE_SNAPSHOTS_RESPONSE = """ + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc.id }} pending {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-1a2b3c4d2{% endif %} {{ vpc.instance_tenancy }} @@ -69,14 +122,38 @@ CREATE_VPC_RESPONSE = """ """ DESCRIBE_VPCS_RESPONSE = """ - - 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + + 7a62c442-3484-4f42-9342-6942EXAMPLE {% for vpc in vpcs %} {{ vpc.id }} {{ vpc.state }} {{ vpc.cidr_block }} + {% if doc_date == "2016-11-15" %} + + {% for assoc in vpc.get_cidr_block_association_set() %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + + {% for assoc in vpc.get_cidr_block_association_set(ipv6=True) %} + + {{assoc.cidr_block}} + {{ assoc.association_id }} + + {{assoc.cidr_block_state.state}} + + + {% endfor %} + + {% endif %} {% if vpc.dhcp_options %}{{ vpc.dhcp_options.id }}{% else %}dopt-7a8b9c2d{% endif %} {{ vpc.instance_tenancy }} {{ vpc.is_default }} @@ -96,14 +173,14 @@ DESCRIBE_VPCS_RESPONSE = """ """ DELETE_VPC_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE {{ vpc_id }} <{{ attribute }}> @@ -112,7 +189,59 @@ DESCRIBE_VPC_ATTRIBUTE_RESPONSE = """ """ MODIFY_VPC_ATTRIBUTE_RESPONSE = """ - + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE true """ + +ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 7a62c49f-347e-4fc4-9331-6e8eEXAMPLE + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_ASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" + +IPV6_DISASSOCIATE_VPC_CIDR_BLOCK_RESPONSE = """ + + 33af6c54-1139-4d50-b4f7-15a8example + {{vpc_id}} + + {{association_id}} + {{cidr_block}} + + {{cidr_block_state}} + + +""" diff --git a/moto/ec2/utils.py b/moto/ec2/utils.py index 32122c763..f5c9b8512 100644 --- a/moto/ec2/utils.py +++ b/moto/ec2/utils.py @@ -27,6 +27,7 @@ EC2_RESOURCE_TO_PREFIX = { 'reservation': 'r', 'volume': 'vol', 'vpc': 'vpc', + 'vpc-cidr-association-id': 'vpc-cidr-assoc', 'vpc-elastic-ip': 'eipalloc', 'vpc-elastic-ip-association': 'eipassoc', 'vpc-peering-connection': 'pcx', @@ -34,16 +35,17 @@ EC2_RESOURCE_TO_PREFIX = { 'vpn-gateway': 'vgw'} -EC2_PREFIX_TO_RESOURCE = dict((v, k) - for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) +EC2_PREFIX_TO_RESOURCE = dict((v, k) for (k, v) in EC2_RESOURCE_TO_PREFIX.items()) + + +def random_resource_id(size=8): + chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] + resource_id = ''.join(six.text_type(random.choice(chars)) for x in range(size)) + return resource_id def random_id(prefix='', size=8): - chars = list(range(10)) + ['a', 'b', 'c', 'd', 'e', 'f'] - - resource_id = ''.join(six.text_type(random.choice(chars)) - for x in range(size)) - return '{0}-{1}'.format(prefix, resource_id) + return '{0}-{1}'.format(prefix, random_resource_id(size)) def random_ami_id(): @@ -110,6 +112,10 @@ def random_vpc_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc']) +def random_vpc_cidr_association_id(): + return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-cidr-association-id']) + + def random_vpc_peering_connection_id(): return random_id(prefix=EC2_RESOURCE_TO_PREFIX['vpc-peering-connection']) @@ -165,6 +171,10 @@ def random_ip(): ) +def random_ipv6_cidr(): + return "2400:6500:{}:{}::/56".format(random_resource_id(4), random_resource_id(4)) + + def generate_route_id(route_table_id, cidr_block): return "%s~%s" % (route_table_id, cidr_block) diff --git a/moto/ecr/models.py b/moto/ecr/models.py index f5b6f24e4..e20c550c9 100644 --- a/moto/ecr/models.py +++ b/moto/ecr/models.py @@ -1,14 +1,14 @@ from __future__ import unicode_literals -# from datetime import datetime + +import hashlib +from copy import copy from random import random from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends -from copy import copy -import hashlib - from moto.ecr.exceptions import ImageNotFoundException, RepositoryNotFoundException +from botocore.exceptions import ParamValidationError DEFAULT_REGISTRY_ID = '012345678910' @@ -145,6 +145,17 @@ class Image(BaseObject): response_object['imagePushedAt'] = '2017-05-09' return response_object + @property + def response_batch_get_image(self): + response_object = {} + response_object['imageId'] = {} + response_object['imageId']['imageTag'] = self.image_tag + response_object['imageId']['imageDigest'] = self.get_image_digest() + response_object['imageManifest'] = self.image_manifest + response_object['repositoryName'] = self.repository + response_object['registryId'] = self.registry_id + return response_object + class ECRBackend(BaseBackend): @@ -245,6 +256,39 @@ class ECRBackend(BaseBackend): repository.images.append(image) return image + def batch_get_image(self, repository_name, registry_id=None, image_ids=None, accepted_media_types=None): + if repository_name in self.repositories: + repository = self.repositories[repository_name] + else: + raise RepositoryNotFoundException(repository_name, registry_id or DEFAULT_REGISTRY_ID) + + if not image_ids: + raise ParamValidationError(msg='Missing required parameter in input: "imageIds"') + + response = { + 'images': [], + 'failures': [], + } + + for image_id in image_ids: + found = False + for image in repository.images: + if (('imageDigest' in image_id and image.get_image_digest() == image_id['imageDigest']) or + ('imageTag' in image_id and image.image_tag == image_id['imageTag'])): + found = True + response['images'].append(image.response_batch_get_image) + + if not found: + response['failures'].append({ + 'imageId': { + 'imageTag': image_id.get('imageTag', 'null') + }, + 'failureCode': 'ImageNotFound', + 'failureReason': 'Requested image not found' + }) + + return response + ecr_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ecr/responses.py b/moto/ecr/responses.py index 6207de4eb..ca45c63c9 100644 --- a/moto/ecr/responses.py +++ b/moto/ecr/responses.py @@ -89,9 +89,13 @@ class ECRResponse(BaseResponse): 'ECR.batch_delete_image is not yet implemented') def batch_get_image(self): - if self.is_not_dryrun('BatchGetImage'): - raise NotImplementedError( - 'ECR.batch_get_image is not yet implemented') + repository_str = self._get_param('repositoryName') + registry_id = self._get_param('registryId') + image_ids = self._get_param('imageIds') + accepted_media_types = self._get_param('acceptedMediaTypes') + + response = self.ecr_backend.batch_get_image(repository_str, registry_id, image_ids, accepted_media_types) + return json.dumps(response) def can_paginate(self): if self.is_not_dryrun('CanPaginate'): diff --git a/moto/ecs/models.py b/moto/ecs/models.py index e0b29cb01..3c51cd03f 100644 --- a/moto/ecs/models.py +++ b/moto/ecs/models.py @@ -24,7 +24,7 @@ class BaseObject(BaseModel): def gen_response_object(self): response_object = copy(self.__dict__) - for key, value in response_object.items(): + for key, value in self.__dict__.items(): if '_' in key: response_object[self.camelCase(key)] = value del response_object[key] @@ -61,7 +61,11 @@ class Cluster(BaseObject): @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): - properties = cloudformation_json['Properties'] + # if properties is not provided, cloudformation will use the default values for all properties + if 'Properties' in cloudformation_json: + properties = cloudformation_json['Properties'] + else: + properties = {} ecs_backend = ecs_backends[region_name] return ecs_backend.create_cluster( @@ -109,6 +113,10 @@ class TaskDefinition(BaseObject): del response_object['arn'] return response_object + @property + def physical_resource_id(self): + return self.arn + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): properties = cloudformation_json['Properties'] @@ -502,10 +510,27 @@ class EC2ContainerServiceBackend(BaseBackend): def _calculate_task_resource_requirements(task_definition): resource_requirements = {"CPU": 0, "MEMORY": 0, "PORTS": [], "PORTS_UDP": []} for container_definition in task_definition.container_definitions: - resource_requirements["CPU"] += container_definition.get('cpu') - resource_requirements["MEMORY"] += container_definition.get("memory") - for port_mapping in container_definition.get("portMappings", []): - resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + # cloudformation uses capitalized properties, while boto uses all lower case + + # CPU is optional + resource_requirements["CPU"] += container_definition.get('cpu', + container_definition.get('Cpu', 0)) + + # either memory or memory reservation must be provided + if 'Memory' in container_definition or 'MemoryReservation' in container_definition: + resource_requirements["MEMORY"] += container_definition.get( + "Memory", container_definition.get('MemoryReservation')) + else: + resource_requirements["MEMORY"] += container_definition.get( + "memory", container_definition.get('memoryReservation')) + + port_mapping_key = 'PortMappings' if 'PortMappings' in container_definition else 'portMappings' + for port_mapping in container_definition.get(port_mapping_key, []): + if 'hostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('hostPort')) + elif 'HostPort' in port_mapping: + resource_requirements["PORTS"].append(port_mapping.get('HostPort')) + return resource_requirements @staticmethod diff --git a/moto/elb/models.py b/moto/elb/models.py index 504c68908..8781620f1 100644 --- a/moto/elb/models.py +++ b/moto/elb/models.py @@ -268,7 +268,7 @@ class ELBBackend(BaseBackend): protocol = port['protocol'] instance_port = port['instance_port'] lb_port = port['load_balancer_port'] - ssl_certificate_id = port.get('sslcertificate_id') + ssl_certificate_id = port.get('ssl_certificate_id') for listener in balancer.listeners: if lb_port == listener.load_balancer_port: if protocol != listener.protocol: diff --git a/moto/elb/responses.py b/moto/elb/responses.py index b1980c9b2..40d6ec2f9 100644 --- a/moto/elb/responses.py +++ b/moto/elb/responses.py @@ -61,7 +61,7 @@ class ELBResponse(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: diff --git a/moto/elbv2/models.py b/moto/elbv2/models.py index 726b1a164..8921581d3 100644 --- a/moto/elbv2/models.py +++ b/moto/elbv2/models.py @@ -486,6 +486,10 @@ class ELBv2Backend(BaseBackend): arn = load_balancer_arn.replace(':loadbalancer/', ':listener/') + "/%s%s" % (port, id(self)) listener = FakeListener(load_balancer_arn, arn, protocol, port, ssl_policy, certificate, default_actions) balancer.listeners[listener.arn] = listener + for action in default_actions: + if action['target_group_arn'] in self.target_groups.keys(): + target_group = self.target_groups[action['target_group_arn']] + target_group.load_balancer_arns.append(load_balancer_arn) return listener def describe_load_balancers(self, arns, names): diff --git a/moto/elbv2/responses.py b/moto/elbv2/responses.py index 7c71ce78a..1814f1273 100644 --- a/moto/elbv2/responses.py +++ b/moto/elbv2/responses.py @@ -242,7 +242,7 @@ class ELBV2Response(BaseResponse): start = all_names.index(marker) + 1 else: start = 0 - page_size = self._get_param('PageSize', 50) # the default is 400, but using 50 to make testing easier + page_size = self._get_int_param('PageSize', 50) # the default is 400, but using 50 to make testing easier load_balancers_resp = all_load_balancers[start:start + page_size] next_marker = None if len(all_load_balancers) > start + page_size: @@ -468,7 +468,7 @@ class ELBV2Response(BaseResponse): def describe_account_limits(self): # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') limits = { 'application-load-balancers': 20, @@ -489,7 +489,7 @@ class ELBV2Response(BaseResponse): names = self._get_multi_param('Names.member.') # Supports paging but not worth implementing yet # marker = self._get_param('Marker') - # page_size = self._get_param('PageSize') + # page_size = self._get_int_param('PageSize') policies = SSL_POLICIES if names: diff --git a/moto/emr/responses.py b/moto/emr/responses.py index 8442e4010..49e37ab9a 100644 --- a/moto/emr/responses.py +++ b/moto/emr/responses.py @@ -462,10 +462,10 @@ DESCRIBE_JOB_FLOWS_TEMPLATE = """ self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d + + +class InventoryJob(Job): + + def __init__(self, job_id, tier, arn): + self.job_id = job_id + self.tier = tier + self.arn = arn + Job.__init__(self, tier) + + def to_dict(self): + d = { + "Action": "InventoryRetrieval", + "ArchiveSHA256TreeHash": None, + "Completed": False, + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "InventorySizeInBytes": 0, + "JobDescription": None, + "JobId": self.job_id, + "RetrievalByteRange": None, + "SHA256TreeHash": None, + "SNSTopic": None, + "StatusCode": "InProgress", + "StatusMessage": None, + "VaultARN": self.arn, + "Tier": self.tier + } + if datetime.datetime.now() > self.et: + d["Completed"] = True + d["CompletionDate"] = self.et.strftime("%Y-%m-%dT%H:%M:%S.000Z") + d["InventorySizeInBytes"] = 10000 + d["StatusCode"] = "Succeeded" + return d class Vault(BaseModel): def __init__(self, vault_name, region): + self.st = datetime.datetime.now() self.vault_name = vault_name self.region = region self.archives = {} @@ -48,29 +107,57 @@ class Vault(BaseModel): return "arn:aws:glacier:{0}:012345678901:vaults/{1}".format(self.region, self.vault_name) def to_dict(self): - return { - "CreationDate": "2013-03-20T17:03:43.221Z", - "LastInventoryDate": "2013-03-20T17:03:43.221Z", - "NumberOfArchives": None, - "SizeInBytes": None, + archives_size = 0 + for k in self.archives: + archives_size += self.archives[k]["size"] + d = { + "CreationDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "LastInventoryDate": self.st.strftime("%Y-%m-%dT%H:%M:%S.000Z"), + "NumberOfArchives": len(self.archives), + "SizeInBytes": archives_size, "VaultARN": self.arn, "VaultName": self.vault_name, } + return d - def create_archive(self, body): - archive_id = hashlib.sha256(body).hexdigest() - self.archives[archive_id] = body + def create_archive(self, body, description): + archive_id = hashlib.md5(body).hexdigest() + self.archives[archive_id] = {} + self.archives[archive_id]["body"] = body + self.archives[archive_id]["size"] = len(body) + self.archives[archive_id]["sha256"] = hashlib.sha256(body).hexdigest() + self.archives[archive_id]["creation_date"] = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.000Z") + self.archives[archive_id]["description"] = description return archive_id def get_archive_body(self, archive_id): - return self.archives[archive_id] + return self.archives[archive_id]["body"] + + def get_archive_list(self): + archive_list = [] + for a in self.archives: + archive = self.archives[a] + aobj = { + "ArchiveId": a, + "ArchiveDescription": archive["description"], + "CreationDate": archive["creation_date"], + "Size": archive["size"], + "SHA256TreeHash": archive["sha256"] + } + archive_list.append(aobj) + return archive_list def delete_archive(self, archive_id): return self.archives.pop(archive_id) - def initiate_job(self, archive_id): + def initiate_job(self, job_type, tier, archive_id): job_id = get_job_id() - job = ArchiveJob(job_id, archive_id) + + if job_type == "inventory-retrieval": + job = InventoryJob(job_id, tier, self.arn) + elif job_type == "archive-retrieval": + job = ArchiveJob(job_id, tier, self.arn, archive_id) + self.jobs[job_id] = job return job_id @@ -80,10 +167,24 @@ class Vault(BaseModel): def describe_job(self, job_id): return self.jobs.get(job_id) + def job_ready(self, job_id): + job = self.describe_job(job_id) + jobj = job.to_dict() + return jobj["Completed"] + def get_job_output(self, job_id): job = self.describe_job(job_id) - archive_body = self.get_archive_body(job.archive_id) - return archive_body + jobj = job.to_dict() + if jobj["Action"] == "InventoryRetrieval": + archives = self.get_archive_list() + return { + "VaultARN": self.arn, + "InventoryDate": jobj["CompletionDate"], + "ArchiveList": archives + } + else: + archive_body = self.get_archive_body(job.archive_id) + return archive_body class GlacierBackend(BaseBackend): @@ -109,9 +210,9 @@ class GlacierBackend(BaseBackend): def delete_vault(self, vault_name): self.vaults.pop(vault_name) - def initiate_job(self, vault_name, archive_id): + def initiate_job(self, vault_name, job_type, tier, archive_id): vault = self.get_vault(vault_name) - job_id = vault.initiate_job(archive_id) + job_id = vault.initiate_job(job_type, tier, archive_id) return job_id def list_jobs(self, vault_name): diff --git a/moto/glacier/responses.py b/moto/glacier/responses.py index cda859b29..abdf83e4f 100644 --- a/moto/glacier/responses.py +++ b/moto/glacier/responses.py @@ -72,17 +72,25 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_archive_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data + description = "" + if 'x-amz-archive-description' in request.headers: + description = request.headers['x-amz-archive-description'] parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) vault_name = full_url.split("/")[-2] if method == 'POST': - return self._vault_archive_response_post(vault_name, body, querystring, headers) + return self._vault_archive_response_post(vault_name, body, description, querystring, headers) + else: + return 400, headers, "400 Bad Request" - def _vault_archive_response_post(self, vault_name, body, querystring, headers): + def _vault_archive_response_post(self, vault_name, body, description, querystring, headers): vault = self.backend.get_vault(vault_name) - vault_id = vault.create_archive(body) + vault_id = vault.create_archive(body, description) headers['x-amz-archive-id'] = vault_id return 201, headers, "" @@ -110,7 +118,10 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_response(self, request, full_url, headers): method = request.method - body = request.body + if hasattr(request, 'body'): + body = request.body + else: + body = request.data account_id = full_url.split("/")[1] vault_name = full_url.split("/")[-2] @@ -125,11 +136,17 @@ class GlacierResponse(_TemplateEnvironmentMixin): }) elif method == 'POST': json_body = json.loads(body.decode("utf-8")) - archive_id = json_body['ArchiveId'] - job_id = self.backend.initiate_job(vault_name, archive_id) + job_type = json_body['Type'] + archive_id = None + if 'ArchiveId' in json_body: + archive_id = json_body['ArchiveId'] + if 'Tier' in json_body: + tier = json_body["Tier"] + else: + tier = "Standard" + job_id = self.backend.initiate_job(vault_name, job_type, tier, archive_id) headers['x-amz-job-id'] = job_id - headers[ - 'Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) + headers['Location'] = "/{0}/vaults/{1}/jobs/{2}".format(account_id, vault_name, job_id) return 202, headers, "" @classmethod @@ -155,8 +172,14 @@ class GlacierResponse(_TemplateEnvironmentMixin): def _vault_jobs_output_response(self, request, full_url, headers): vault_name = full_url.split("/")[-4] job_id = full_url.split("/")[-2] - vault = self.backend.get_vault(vault_name) - output = vault.get_job_output(job_id) - headers['content-type'] = 'application/octet-stream' - return 200, headers, output + if vault.job_ready(job_id): + output = vault.get_job_output(job_id) + if isinstance(output, dict): + headers['content-type'] = 'application/json' + return 200, headers, json.dumps(output) + else: + headers['content-type'] = 'application/octet-stream' + return 200, headers, output + else: + return 404, headers, "404 Not Found" diff --git a/moto/iam/models.py b/moto/iam/models.py index 7839d3a74..32ca144c3 100644 --- a/moto/iam/models.py +++ b/moto/iam/models.py @@ -122,7 +122,7 @@ class Role(BaseModel): role = iam_backend.create_role( role_name=resource_name, assume_role_policy_document=properties['AssumeRolePolicyDocument'], - path=properties['Path'], + path=properties.get('Path', '/'), ) policies = properties.get('Policies', []) @@ -173,7 +173,7 @@ class InstanceProfile(BaseModel): role_ids = properties['Roles'] return iam_backend.create_instance_profile( name=resource_name, - path=properties['Path'], + path=properties.get('Path', '/'), role_ids=role_ids, ) @@ -349,6 +349,14 @@ class User(BaseModel): raise IAMNotFoundException( "Key {0} not found".format(access_key_id)) + def update_access_key(self, access_key_id, status): + for key in self.access_keys: + if key.access_key_id == access_key_id: + key.status = status + break + else: + raise IAMNotFoundException("The Access Key with id {0} cannot be found".format(access_key_id)) + def get_cfn_attribute(self, attribute_name): from moto.cloudformation.exceptions import UnformattedGetAttTemplateException if attribute_name == 'Arn': @@ -817,6 +825,10 @@ class IAMBackend(BaseBackend): key = user.create_access_key() return key + def update_access_key(self, user_name, access_key_id, status): + user = self.get_user(user_name) + user.update_access_key(access_key_id, status) + def get_all_access_keys(self, user_name, marker=None, max_items=None): user = self.get_user(user_name) keys = user.get_all_access_keys() diff --git a/moto/iam/responses.py b/moto/iam/responses.py index 0e11c09d5..9931cb8d0 100644 --- a/moto/iam/responses.py +++ b/moto/iam/responses.py @@ -440,6 +440,14 @@ class IamResponse(BaseResponse): template = self.response_template(CREATE_ACCESS_KEY_TEMPLATE) return template.render(key=key) + def update_access_key(self): + user_name = self._get_param('UserName') + access_key_id = self._get_param('AccessKeyId') + status = self._get_param('Status') + iam_backend.update_access_key(user_name, access_key_id, status) + template = self.response_template(GENERIC_EMPTY_TEMPLATE) + return template.render(name='UpdateAccessKey') + def list_access_keys(self): user_name = self._get_param('UserName') diff --git a/moto/iot/exceptions.py b/moto/iot/exceptions.py index 4bb01c095..7bbdb706d 100644 --- a/moto/iot/exceptions.py +++ b/moto/iot/exceptions.py @@ -16,9 +16,18 @@ class ResourceNotFoundException(IoTClientError): class InvalidRequestException(IoTClientError): - def __init__(self): + def __init__(self, msg=None): self.code = 400 super(InvalidRequestException, self).__init__( "InvalidRequestException", - "The request is not valid." + msg or "The request is not valid." + ) + + +class VersionConflictException(IoTClientError): + def __init__(self, name): + self.code = 409 + super(VersionConflictException, self).__init__( + 'VersionConflictException', + 'The version for thing %s does not match the expected version.' % name ) diff --git a/moto/iot/models.py b/moto/iot/models.py index 1efa6690e..77b0dde08 100644 --- a/moto/iot/models.py +++ b/moto/iot/models.py @@ -9,7 +9,8 @@ from moto.core import BaseBackend, BaseModel from collections import OrderedDict from .exceptions import ( ResourceNotFoundException, - InvalidRequestException + InvalidRequestException, + VersionConflictException ) @@ -44,6 +45,7 @@ class FakeThingType(BaseModel): self.region_name = region_name self.thing_type_name = thing_type_name self.thing_type_properties = thing_type_properties + self.thing_type_id = str(uuid.uuid4()) # I don't know the rule of id t = time.time() self.metadata = { 'deprecated': False, @@ -54,11 +56,37 @@ class FakeThingType(BaseModel): def to_dict(self): return { 'thingTypeName': self.thing_type_name, + 'thingTypeId': self.thing_type_id, 'thingTypeProperties': self.thing_type_properties, 'thingTypeMetadata': self.metadata } +class FakeThingGroup(BaseModel): + def __init__(self, thing_group_name, parent_group_name, thing_group_properties, region_name): + self.region_name = region_name + self.thing_group_name = thing_group_name + self.thing_group_id = str(uuid.uuid4()) # I don't know the rule of id + self.version = 1 # TODO: tmp + self.parent_group_name = parent_group_name + self.thing_group_properties = thing_group_properties or {} + t = time.time() + self.metadata = { + 'creationData': int(t * 1000) / 1000.0 + } + self.arn = 'arn:aws:iot:%s:1:thinggroup/%s' % (self.region_name, thing_group_name) + self.things = OrderedDict() + + def to_dict(self): + return { + 'thingGroupName': self.thing_group_name, + 'thingGroupId': self.thing_group_id, + 'version': self.version, + 'thingGroupProperties': self.thing_group_properties, + 'thingGroupMetadata': self.metadata + } + + class FakeCertificate(BaseModel): def __init__(self, certificate_pem, status, region_name): m = hashlib.sha256() @@ -137,6 +165,7 @@ class IoTBackend(BaseBackend): self.region_name = region_name self.things = OrderedDict() self.thing_types = OrderedDict() + self.thing_groups = OrderedDict() self.certificates = OrderedDict() self.policies = OrderedDict() self.principal_policies = OrderedDict() @@ -359,6 +388,125 @@ class IoTBackend(BaseBackend): principals = [k[0] for k, v in self.principal_things.items() if k[1] == thing_name] return principals + def describe_thing_group(self, thing_group_name): + thing_groups = [_ for _ in self.thing_groups.values() if _.thing_group_name == thing_group_name] + if len(thing_groups) == 0: + raise ResourceNotFoundException() + return thing_groups[0] + + def create_thing_group(self, thing_group_name, parent_group_name, thing_group_properties): + thing_group = FakeThingGroup(thing_group_name, parent_group_name, thing_group_properties, self.region_name) + self.thing_groups[thing_group.arn] = thing_group + return thing_group.thing_group_name, thing_group.arn, thing_group.thing_group_id + + def delete_thing_group(self, thing_group_name, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + del self.thing_groups[thing_group.arn] + + def list_thing_groups(self, parent_group, name_prefix_filter, recursive): + thing_groups = self.thing_groups.values() + return thing_groups + + def update_thing_group(self, thing_group_name, thing_group_properties, expected_version): + thing_group = self.describe_thing_group(thing_group_name) + if expected_version and expected_version != thing_group.version: + raise VersionConflictException(thing_group_name) + attribute_payload = thing_group_properties.get('attributePayload', None) + if attribute_payload is not None and 'attributes' in attribute_payload: + do_merge = attribute_payload.get('merge', False) + attributes = attribute_payload['attributes'] + if not do_merge: + thing_group.thing_group_properties['attributePayload']['attributes'] = attributes + else: + thing_group.thing_group_properties['attributePayload']['attributes'].update(attributes) + elif attribute_payload is not None and 'attributes' not in attribute_payload: + thing_group.attributes = {} + thing_group.version = thing_group.version + 1 + return thing_group.version + + def _identify_thing_group(self, thing_group_name, thing_group_arn): + # identify thing group + if thing_group_name is None and thing_group_arn is None: + raise InvalidRequestException( + ' Both thingGroupArn and thingGroupName are empty. Need to specify at least one of them' + ) + if thing_group_name is not None: + thing_group = self.describe_thing_group(thing_group_name) + if thing_group_arn and thing_group.arn != thing_group_arn: + raise InvalidRequestException( + 'ThingGroupName thingGroupArn does not match specified thingGroupName in request' + ) + elif thing_group_arn is not None: + if thing_group_arn not in self.thing_groups: + raise InvalidRequestException() + thing_group = self.thing_groups[thing_group_arn] + return thing_group + + def _identify_thing(self, thing_name, thing_arn): + # identify thing + if thing_name is None and thing_arn is None: + raise InvalidRequestException( + 'Both thingArn and thingName are empty. Need to specify at least one of them' + ) + if thing_name is not None: + thing = self.describe_thing(thing_name) + if thing_arn and thing.arn != thing_arn: + raise InvalidRequestException( + 'ThingName thingArn does not match specified thingName in request' + ) + elif thing_arn is not None: + if thing_arn not in self.things: + raise InvalidRequestException() + thing = self.things[thing_arn] + return thing + + def add_thing_to_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn in thing_group.things: + # aws ignores duplicate registration + return + thing_group.things[thing.arn] = thing + + def remove_thing_from_thing_group(self, thing_group_name, thing_group_arn, thing_name, thing_arn): + thing_group = self._identify_thing_group(thing_group_name, thing_group_arn) + thing = self._identify_thing(thing_name, thing_arn) + if thing.arn not in thing_group.things: + # aws ignores non-registered thing + return + del thing_group.things[thing.arn] + + def list_things_in_thing_group(self, thing_group_name, recursive): + thing_group = self.describe_thing_group(thing_group_name) + return thing_group.things.values() + + def list_thing_groups_for_thing(self, thing_name): + thing = self.describe_thing(thing_name) + all_thing_groups = self.list_thing_groups(None, None, None) + ret = [] + for thing_group in all_thing_groups: + if thing.arn in thing_group.things: + ret.append({ + 'groupName': thing_group.thing_group_name, + 'groupArn': thing_group.arn + }) + return ret + + def update_thing_groups_for_thing(self, thing_name, thing_groups_to_add, thing_groups_to_remove): + thing = self.describe_thing(thing_name) + for thing_group_name in thing_groups_to_add: + thing_group = self.describe_thing_group(thing_group_name) + self.add_thing_to_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + for thing_group_name in thing_groups_to_remove: + thing_group = self.describe_thing_group(thing_group_name) + self.remove_thing_from_thing_group( + thing_group.thing_group_name, None, + thing.thing_name, None + ) + available_regions = boto3.session.Session().get_available_regions("iot") iot_backends = {region: IoTBackend(region) for region in available_regions} diff --git a/moto/iot/responses.py b/moto/iot/responses.py index bbe2bb016..4bd35bce4 100644 --- a/moto/iot/responses.py +++ b/moto/iot/responses.py @@ -38,8 +38,7 @@ class IoTResponse(BaseResponse): thing_types = self.iot_backend.list_thing_types( thing_type_name=thing_type_name ) - - # TODO: support next_token and max_results + # TODO: implement pagination in the future next_token = None return json.dumps(dict(thingTypes=[_.to_dict() for _ in thing_types], nextToken=next_token)) @@ -54,7 +53,7 @@ class IoTResponse(BaseResponse): attribute_value=attribute_value, thing_type_name=thing_type_name, ) - # TODO: support next_token and max_results + # TODO: implement pagination in the future next_token = None return json.dumps(dict(things=[_.to_dict() for _ in things], nextToken=next_token)) @@ -63,7 +62,6 @@ class IoTResponse(BaseResponse): thing = self.iot_backend.describe_thing( thing_name=thing_name, ) - print(thing.to_dict(include_default_client_id=True)) return json.dumps(thing.to_dict(include_default_client_id=True)) def describe_thing_type(self): @@ -105,7 +103,7 @@ class IoTResponse(BaseResponse): return json.dumps(dict()) def create_keys_and_certificate(self): - set_as_active = self._get_param("setAsActive") + set_as_active = self._get_bool_param("setAsActive") cert, key_pair = self.iot_backend.create_keys_and_certificate( set_as_active=set_as_active, ) @@ -135,7 +133,7 @@ class IoTResponse(BaseResponse): # marker = self._get_param("marker") # ascending_order = self._get_param("ascendingOrder") certificates = self.iot_backend.list_certificates() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(certificates=[_.to_dict() for _ in certificates])) def update_certificate(self): @@ -162,7 +160,7 @@ class IoTResponse(BaseResponse): # ascending_order = self._get_param("ascendingOrder") policies = self.iot_backend.list_policies() - # TODO: handle pagination + # TODO: implement pagination in the future return json.dumps(dict(policies=[_.to_dict() for _ in policies])) def get_policy(self): @@ -205,7 +203,7 @@ class IoTResponse(BaseResponse): policies = self.iot_backend.list_principal_policies( principal_arn=principal ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(policies=[_.to_dict() for _ in policies], nextMarker=next_marker)) @@ -217,7 +215,7 @@ class IoTResponse(BaseResponse): principals = self.iot_backend.list_policy_principals( policy_name=policy_name, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_marker = None return json.dumps(dict(principals=principals, nextMarker=next_marker)) @@ -246,7 +244,7 @@ class IoTResponse(BaseResponse): things = self.iot_backend.list_principal_things( principal_arn=principal, ) - # TODO: handle pagination + # TODO: implement pagination in the future next_token = None return json.dumps(dict(things=things, nextToken=next_token)) @@ -256,3 +254,123 @@ class IoTResponse(BaseResponse): thing_name=thing_name, ) return json.dumps(dict(principals=principals)) + + def describe_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group = self.iot_backend.describe_thing_group( + thing_group_name=thing_group_name, + ) + return json.dumps(thing_group.to_dict()) + + def create_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + parent_group_name = self._get_param("parentGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + thing_group_name, thing_group_arn, thing_group_id = self.iot_backend.create_thing_group( + thing_group_name=thing_group_name, + parent_group_name=parent_group_name, + thing_group_properties=thing_group_properties, + ) + return json.dumps(dict( + thingGroupName=thing_group_name, + thingGroupArn=thing_group_arn, + thingGroupId=thing_group_id) + ) + + def delete_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + expected_version = self._get_param("expectedVersion") + self.iot_backend.delete_thing_group( + thing_group_name=thing_group_name, + expected_version=expected_version, + ) + return json.dumps(dict()) + + def list_thing_groups(self): + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + parent_group = self._get_param("parentGroup") + name_prefix_filter = self._get_param("namePrefixFilter") + recursive = self._get_param("recursive") + thing_groups = self.iot_backend.list_thing_groups( + parent_group=parent_group, + name_prefix_filter=name_prefix_filter, + recursive=recursive, + ) + next_token = None + rets = [{'groupName': _.thing_group_name, 'groupArn': _.arn} for _ in thing_groups] + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=rets, nextToken=next_token)) + + def update_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_properties = self._get_param("thingGroupProperties") + expected_version = self._get_param("expectedVersion") + version = self.iot_backend.update_thing_group( + thing_group_name=thing_group_name, + thing_group_properties=thing_group_properties, + expected_version=expected_version, + ) + return json.dumps(dict(version=version)) + + def add_thing_to_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.add_thing_to_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def remove_thing_from_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + thing_group_arn = self._get_param("thingGroupArn") + thing_name = self._get_param("thingName") + thing_arn = self._get_param("thingArn") + self.iot_backend.remove_thing_from_thing_group( + thing_group_name=thing_group_name, + thing_group_arn=thing_group_arn, + thing_name=thing_name, + thing_arn=thing_arn, + ) + return json.dumps(dict()) + + def list_things_in_thing_group(self): + thing_group_name = self._get_param("thingGroupName") + recursive = self._get_param("recursive") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + things = self.iot_backend.list_things_in_thing_group( + thing_group_name=thing_group_name, + recursive=recursive, + ) + next_token = None + thing_names = [_.thing_name for _ in things] + # TODO: implement pagination in the future + return json.dumps(dict(things=thing_names, nextToken=next_token)) + + def list_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + # next_token = self._get_param("nextToken") + # max_results = self._get_int_param("maxResults") + thing_groups = self.iot_backend.list_thing_groups_for_thing( + thing_name=thing_name + ) + next_token = None + # TODO: implement pagination in the future + return json.dumps(dict(thingGroups=thing_groups, nextToken=next_token)) + + def update_thing_groups_for_thing(self): + thing_name = self._get_param("thingName") + thing_groups_to_add = self._get_param("thingGroupsToAdd") or [] + thing_groups_to_remove = self._get_param("thingGroupsToRemove") or [] + self.iot_backend.update_thing_groups_for_thing( + thing_name=thing_name, + thing_groups_to_add=thing_groups_to_add, + thing_groups_to_remove=thing_groups_to_remove, + ) + return json.dumps(dict()) diff --git a/moto/kinesis/exceptions.py b/moto/kinesis/exceptions.py index e2fe02775..82f796ecc 100644 --- a/moto/kinesis/exceptions.py +++ b/moto/kinesis/exceptions.py @@ -17,7 +17,7 @@ class ResourceNotFoundError(BadRequest): class ResourceInUseError(BadRequest): def __init__(self, message): - super(ResourceNotFoundError, self).__init__() + super(ResourceInUseError, self).__init__() self.description = json.dumps({ "message": message, '__type': 'ResourceInUseException', diff --git a/moto/logs/exceptions.py b/moto/logs/exceptions.py new file mode 100644 index 000000000..cc83452ea --- /dev/null +++ b/moto/logs/exceptions.py @@ -0,0 +1,33 @@ +from __future__ import unicode_literals +from moto.core.exceptions import JsonRESTError + + +class LogsClientError(JsonRESTError): + code = 400 + + +class ResourceNotFoundException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceNotFoundException, self).__init__( + "ResourceNotFoundException", + "The specified resource does not exist" + ) + + +class InvalidParameterException(LogsClientError): + def __init__(self, msg=None): + self.code = 400 + super(InvalidParameterException, self).__init__( + "InvalidParameterException", + msg or "A parameter is specified incorrectly." + ) + + +class ResourceAlreadyExistsException(LogsClientError): + def __init__(self): + self.code = 400 + super(ResourceAlreadyExistsException, self).__init__( + 'ResourceAlreadyExistsException', + 'The specified resource already exists.' + ) diff --git a/moto/logs/models.py b/moto/logs/models.py index 09dcb3645..3ae697a27 100644 --- a/moto/logs/models.py +++ b/moto/logs/models.py @@ -1,6 +1,10 @@ from moto.core import BaseBackend import boto.logs from moto.core.utils import unix_time_millis +from .exceptions import ( + ResourceNotFoundException, + ResourceAlreadyExistsException +) class LogEvent: @@ -49,23 +53,29 @@ class LogStream: self.__class__._log_ids += 1 def _update(self): - self.firstEventTimestamp = min([x.timestamp for x in self.events]) - self.lastEventTimestamp = max([x.timestamp for x in self.events]) + # events can be empty when stream is described soon after creation + self.firstEventTimestamp = min([x.timestamp for x in self.events]) if self.events else None + self.lastEventTimestamp = max([x.timestamp for x in self.events]) if self.events else None def to_describe_dict(self): # Compute start and end times self._update() - return { + res = { "arn": self.arn, "creationTime": self.creationTime, - "firstEventTimestamp": self.firstEventTimestamp, - "lastEventTimestamp": self.lastEventTimestamp, - "lastIngestionTime": self.lastIngestionTime, "logStreamName": self.logStreamName, "storedBytes": self.storedBytes, - "uploadSequenceToken": str(self.uploadSequenceToken), } + if self.events: + rest = { + "firstEventTimestamp": self.firstEventTimestamp, + "lastEventTimestamp": self.lastEventTimestamp, + "lastIngestionTime": self.lastIngestionTime, + "uploadSequenceToken": str(self.uploadSequenceToken), + } + res.update(rest) + return res def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: ensure sequence_token @@ -126,18 +136,22 @@ class LogGroup: self.streams = dict() # {name: LogStream} def create_log_stream(self, log_stream_name): - assert log_stream_name not in self.streams + if log_stream_name in self.streams: + raise ResourceAlreadyExistsException() self.streams[log_stream_name] = LogStream(self.region, self.name, log_stream_name) def delete_log_stream(self, log_stream_name): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() del self.streams[log_stream_name] def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): + # responses only logStreamName, creationTime, arn, storedBytes when no events are stored. + log_streams = [(name, stream.to_describe_dict()) for name, stream in self.streams.items() if name.startswith(log_stream_name_prefix)] def sorter(item): - return item[0] if order_by == 'logStreamName' else item[1]['lastEventTimestamp'] + return item[0] if order_by == 'logStreamName' else item[1].get('lastEventTimestamp', 0) if next_token is None: next_token = 0 @@ -151,18 +165,18 @@ class LogGroup: return log_streams_page, new_token def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_stream_name in self.streams + if log_stream_name not in self.streams: + raise ResourceNotFoundException() stream = self.streams[log_stream_name] return stream.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert not filter_pattern # TODO: impl - streams = [stream for name, stream in self.streams.items() if not log_stream_names or name in log_stream_names] events = [] @@ -170,7 +184,7 @@ class LogGroup: events += stream.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) if interleaved: - events = sorted(events, key=lambda event: event.timestamp) + events = sorted(events, key=lambda event: event['timestamp']) if next_token is None: next_token = 0 @@ -195,7 +209,8 @@ class LogsBackend(BaseBackend): self.__init__(region_name) def create_log_group(self, log_group_name, tags): - assert log_group_name not in self.groups + if log_group_name in self.groups: + raise ResourceAlreadyExistsException() self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def ensure_log_group(self, log_group_name, tags): @@ -204,37 +219,44 @@ class LogsBackend(BaseBackend): self.groups[log_group_name] = LogGroup(self.region_name, log_group_name, tags) def delete_log_group(self, log_group_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() del self.groups[log_group_name] def create_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.create_log_stream(log_stream_name) def delete_log_stream(self, log_group_name, log_stream_name): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.delete_log_stream(log_stream_name) def describe_log_streams(self, descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.describe_log_streams(descending, limit, log_group_name, log_stream_name_prefix, next_token, order_by) def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token): # TODO: add support for sequence_tokens - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.put_log_events(log_group_name, log_stream_name, log_events, sequence_token) def get_log_events(self, log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) def filter_log_events(self, log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved): - assert log_group_name in self.groups + if log_group_name not in self.groups: + raise ResourceNotFoundException() log_group = self.groups[log_group_name] return log_group.filter_log_events(log_group_name, log_stream_names, start_time, end_time, limit, next_token, filter_pattern, interleaved) diff --git a/moto/logs/responses.py b/moto/logs/responses.py index e0a17f5f8..7bf481908 100644 --- a/moto/logs/responses.py +++ b/moto/logs/responses.py @@ -87,9 +87,8 @@ class LogsResponse(BaseResponse): events, next_backward_token, next_foward_token = \ self.logs_backend.get_log_events(log_group_name, log_stream_name, start_time, end_time, limit, next_token, start_from_head) - return json.dumps({ - "events": [ob.__dict__ for ob in events], + "events": events, "nextBackwardToken": next_backward_token, "nextForwardToken": next_foward_token }) diff --git a/moto/opsworks/models.py b/moto/opsworks/models.py index fe8c882a7..4fe428c65 100644 --- a/moto/opsworks/models.py +++ b/moto/opsworks/models.py @@ -398,11 +398,82 @@ class Stack(BaseModel): return response +class App(BaseModel): + + def __init__(self, stack_id, name, type, + shortname=None, + description=None, + datasources=None, + app_source=None, + domains=None, + enable_ssl=False, + ssl_configuration=None, + attributes=None, + environment=None): + self.stack_id = stack_id + self.name = name + self.type = type + self.shortname = shortname + self.description = description + + self.datasources = datasources + if datasources is None: + self.datasources = [] + + self.app_source = app_source + if app_source is None: + self.app_source = {} + + self.domains = domains + if domains is None: + self.domains = [] + + self.enable_ssl = enable_ssl + + self.ssl_configuration = ssl_configuration + if ssl_configuration is None: + self.ssl_configuration = {} + + self.attributes = attributes + if attributes is None: + self.attributes = {} + + self.environment = environment + if environment is None: + self.environment = {} + + self.id = "{0}".format(uuid.uuid4()) + self.created_at = datetime.datetime.utcnow() + + def __eq__(self, other): + return self.id == other.id + + def to_dict(self): + d = { + "AppId": self.id, + "AppSource": self.app_source, + "Attributes": self.attributes, + "CreatedAt": self.created_at.isoformat(), + "Datasources": self.datasources, + "Description": self.description, + "Domains": self.domains, + "EnableSsl": self.enable_ssl, + "Environment": self.environment, + "Name": self.name, + "Shortname": self.shortname, + "SslConfiguration": self.ssl_configuration, + "StackId": self.stack_id, + "Type": self.type + } + return d + + class OpsWorksBackend(BaseBackend): def __init__(self, ec2_backend): self.stacks = {} self.layers = {} + self.apps = {} self.instances = {} self.ec2_backend = ec2_backend @@ -435,6 +506,20 @@ class OpsWorksBackend(BaseBackend): self.stacks[stackid].layers.append(layer) return layer + def create_app(self, **kwargs): + name = kwargs['name'] + stackid = kwargs['stack_id'] + if stackid not in self.stacks: + raise ResourceNotFoundException(stackid) + if name in [a.name for a in self.stacks[stackid].apps]: + raise ValidationException( + 'There is already an app named "{0}" ' + 'for this stack'.format(name)) + app = App(**kwargs) + self.apps[app.id] = app + self.stacks[stackid].apps.append(app) + return app + def create_instance(self, **kwargs): stack_id = kwargs['stack_id'] layer_ids = kwargs['layer_ids'] @@ -502,6 +587,22 @@ class OpsWorksBackend(BaseBackend): raise ResourceNotFoundException(", ".join(unknown_layers)) return [self.layers[id].to_dict() for id in layer_ids] + def describe_apps(self, stack_id, app_ids): + if stack_id is not None and app_ids is not None: + raise ValidationException( + "Please provide one or more app IDs or a stack ID" + ) + if stack_id is not None: + if stack_id not in self.stacks: + raise ResourceNotFoundException( + "Unable to find stack with ID {0}".format(stack_id)) + return [app.to_dict() for app in self.stacks[stack_id].apps] + + unknown_apps = set(app_ids) - set(self.apps.keys()) + if unknown_apps: + raise ResourceNotFoundException(", ".join(unknown_apps)) + return [self.apps[id].to_dict() for id in app_ids] + def describe_instances(self, instance_ids, layer_id, stack_id): if len(list(filter(None, (instance_ids, layer_id, stack_id)))) != 1: raise ValidationException("Please provide either one or more " diff --git a/moto/opsworks/responses.py b/moto/opsworks/responses.py index 42e0f2c5c..c9f8fe125 100644 --- a/moto/opsworks/responses.py +++ b/moto/opsworks/responses.py @@ -75,6 +75,24 @@ class OpsWorksResponse(BaseResponse): layer = self.opsworks_backend.create_layer(**kwargs) return json.dumps({"LayerId": layer.id}, indent=1) + def create_app(self): + kwargs = dict( + stack_id=self.parameters.get('StackId'), + name=self.parameters.get('Name'), + type=self.parameters.get('Type'), + shortname=self.parameters.get('Shortname'), + description=self.parameters.get('Description'), + datasources=self.parameters.get('DataSources'), + app_source=self.parameters.get('AppSource'), + domains=self.parameters.get('Domains'), + enable_ssl=self.parameters.get('EnableSsl'), + ssl_configuration=self.parameters.get('SslConfiguration'), + attributes=self.parameters.get('Attributes'), + environment=self.parameters.get('Environment') + ) + app = self.opsworks_backend.create_app(**kwargs) + return json.dumps({"AppId": app.id}, indent=1) + def create_instance(self): kwargs = dict( stack_id=self.parameters.get("StackId"), @@ -110,6 +128,12 @@ class OpsWorksResponse(BaseResponse): layers = self.opsworks_backend.describe_layers(stack_id, layer_ids) return json.dumps({"Layers": layers}, indent=1) + def describe_apps(self): + stack_id = self.parameters.get("StackId") + app_ids = self.parameters.get("AppIds") + apps = self.opsworks_backend.describe_apps(stack_id, app_ids) + return json.dumps({"Apps": apps}, indent=1) + def describe_instances(self): instance_ids = self.parameters.get("InstanceIds") layer_id = self.parameters.get("LayerId") diff --git a/moto/packages/responses/.gitignore b/moto/packages/responses/.gitignore deleted file mode 100644 index 5d4406b8d..000000000 --- a/moto/packages/responses/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.arcconfig -.coverage -.DS_Store -.idea -*.db -*.egg-info -*.pyc -/htmlcov -/dist -/build -/.cache -/.tox diff --git a/moto/packages/responses/.travis.yml b/moto/packages/responses/.travis.yml deleted file mode 100644 index 9ab219db0..000000000 --- a/moto/packages/responses/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: python -sudo: false -python: - - "2.6" - - "2.7" - - "3.3" - - "3.4" - - "3.5" -cache: - directories: - - .pip_download_cache -env: - matrix: - - REQUESTS=requests==2.0 - - REQUESTS=-U requests - - REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests" - global: - - PIP_DOWNLOAD_CACHE=".pip_download_cache" -matrix: - allow_failures: - - env: 'REQUESTS="-e git+git://github.com/kennethreitz/requests.git#egg=requests"' -install: - - "pip install ${REQUESTS}" - - make develop -script: - - if [[ $TRAVIS_PYTHON_VERSION != 2.6 ]]; then make lint; fi - - py.test . --cov responses --cov-report term-missing diff --git a/moto/packages/responses/CHANGES b/moto/packages/responses/CHANGES deleted file mode 100644 index 1bfd7ead8..000000000 --- a/moto/packages/responses/CHANGES +++ /dev/null @@ -1,32 +0,0 @@ -Unreleased ----------- - -- Allow empty list/dict as json object (GH-100) - -0.5.1 ------ - -- Add LICENSE, README and CHANGES to the PyPI distribution (GH-97). - -0.5.0 ------ - -- Allow passing a JSON body to `response.add` (GH-82) -- Improve ConnectionError emulation (GH-73) -- Correct assertion in assert_all_requests_are_fired (GH-71) - -0.4.0 ------ - -- Requests 2.0+ is required -- Mocking now happens on the adapter instead of the session - -0.3.0 ------ - -- Add the ability to mock errors (GH-22) -- Add responses.mock context manager (GH-36) -- Support custom adapters (GH-33) -- Add support for regexp error matching (GH-25) -- Add support for dynamic bodies via `responses.add_callback` (GH-24) -- Preserve argspec when using `responses.activate` decorator (GH-18) diff --git a/moto/packages/responses/LICENSE b/moto/packages/responses/LICENSE deleted file mode 100644 index 52b44b20a..000000000 --- a/moto/packages/responses/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright 2015 David Cramer - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/moto/packages/responses/MANIFEST.in b/moto/packages/responses/MANIFEST.in deleted file mode 100644 index ef901684c..000000000 --- a/moto/packages/responses/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -include README.rst CHANGES LICENSE -global-exclude *~ diff --git a/moto/packages/responses/Makefile b/moto/packages/responses/Makefile deleted file mode 100644 index 9da42c6d1..000000000 --- a/moto/packages/responses/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -develop: - pip install -e . - make install-test-requirements - -install-test-requirements: - pip install "file://`pwd`#egg=responses[tests]" - -test: develop lint - @echo "Running Python tests" - py.test . - @echo "" - -lint: - @echo "Linting Python files" - PYFLAKES_NODOCTEST=1 flake8 . - @echo "" diff --git a/moto/packages/responses/README.rst b/moto/packages/responses/README.rst deleted file mode 100644 index 5f946fcde..000000000 --- a/moto/packages/responses/README.rst +++ /dev/null @@ -1,190 +0,0 @@ -Responses -========= - -.. image:: https://travis-ci.org/getsentry/responses.svg?branch=master - :target: https://travis-ci.org/getsentry/responses - -A utility library for mocking out the `requests` Python library. - -.. note:: Responses requires Requests >= 2.0 - -Response body as string ------------------------ - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -You can also specify a JSON object instead of a body string. - -.. code-block:: python - - import responses - import requests - - @responses.activate - def test_my_api(): - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - json={"error": "not found"}, status=404) - - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.json() == {"error": "not found"} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar' - assert responses.calls[0].response.text == '{"error": "not found"}' - -Request callback ----------------- - -.. code-block:: python - - import json - - import responses - import requests - - @responses.activate - def test_calc_api(): - - def request_callback(request): - payload = json.loads(request.body) - resp_body = {'value': sum(payload['numbers'])} - headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'} - return (200, headers, json.dumps(resp_body)) - - responses.add_callback( - responses.POST, 'http://calc.com/sum', - callback=request_callback, - content_type='application/json', - ) - - resp = requests.post( - 'http://calc.com/sum', - json.dumps({'numbers': [1, 2, 3]}), - headers={'content-type': 'application/json'}, - ) - - assert resp.json() == {'value': 6} - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://calc.com/sum' - assert responses.calls[0].response.text == '{"value": 6}' - assert ( - responses.calls[0].response.headers['request-id'] == - '728d329e-0e86-11e4-a748-0c84dc037c13' - ) - -Instead of passing a string URL into `responses.add` or `responses.add_callback` -you can also supply a compiled regular expression. - -.. code-block:: python - - import re - import responses - import requests - - # Instead of - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{"error": "not found"}', status=404, - content_type='application/json') - - # You can do the following - url_re = re.compile(r'https?://twitter\.com/api/\d+/foobar') - responses.add(responses.GET, url_re, - body='{"error": "not found"}', status=404, - content_type='application/json') - -A response can also throw an exception as follows. - -.. code-block:: python - - import responses - import requests - from requests.exceptions import HTTPError - - exception = HTTPError('Something went wrong') - responses.add(responses.GET, 'http://twitter.com/api/1/foobar', - body=exception) - # All calls to 'http://twitter.com/api/1/foobar' will throw exception. - - -Responses as a context manager ------------------------------- - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock() as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - resp = requests.get('http://twitter.com/api/1/foobar') - - assert resp.status_code == 200 - - # outside the context manager requests will hit the remote server - resp = requests.get('http://twitter.com/api/1/foobar') - resp.status_code == 404 - - -Assertions on declared responses --------------------------------- - -When used as a context manager, Responses will, by default, raise an assertion -error if a url was registered but not accessed. This can be disabled by passing -the ``assert_all_requests_are_fired`` value: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - -Multiple Responses ------------------- -You can also use ``assert_all_requests_are_fired`` to add multiple responses for the same url: - -.. code-block:: python - - import responses - import requests - - - def test_my_api(): - with responses.RequestsMock(assert_all_requests_are_fired=True) as rsps: - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500) - rsps.add(responses.GET, 'http://twitter.com/api/1/foobar', - body='{}', status=200, - content_type='application/json') - - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 500 - resp = requests.get('http://twitter.com/api/1/foobar') - assert resp.status_code == 200 diff --git a/moto/packages/responses/__init__.py b/moto/packages/responses/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/moto/packages/responses/responses.py b/moto/packages/responses/responses.py deleted file mode 100644 index 3bc437f0b..000000000 --- a/moto/packages/responses/responses.py +++ /dev/null @@ -1,330 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import inspect -import json as json_module -import re -import six - -from collections import namedtuple, Sequence, Sized -from functools import update_wrapper -from cookies import Cookies -from requests.adapters import HTTPAdapter -from requests.utils import cookiejar_from_dict -from requests.exceptions import ConnectionError -from requests.sessions import REDIRECT_STATI - -try: - from requests.packages.urllib3.response import HTTPResponse -except ImportError: - from urllib3.response import HTTPResponse - -if six.PY2: - from urlparse import urlparse, parse_qsl -else: - from urllib.parse import urlparse, parse_qsl - -if six.PY2: - try: - from six import cStringIO as BufferIO - except ImportError: - from six import StringIO as BufferIO -else: - from io import BytesIO as BufferIO - - -Call = namedtuple('Call', ['request', 'response']) - -_wrapper_template = """\ -def wrapper%(signature)s: - with responses: - return func%(funcargs)s -""" - - -def _is_string(s): - return isinstance(s, (six.string_types, six.text_type)) - - -def _is_redirect(response): - try: - # 2.0.0 <= requests <= 2.2 - return response.is_redirect - except AttributeError: - # requests > 2.2 - return ( - # use request.sessions conditional - response.status_code in REDIRECT_STATI and - 'location' in response.headers - ) - - -def get_wrapped(func, wrapper_template, evaldict): - # Preserve the argspec for the wrapped function so that testing - # tools such as pytest can continue to use their fixture injection. - args, a, kw, defaults = inspect.getargspec(func) - - signature = inspect.formatargspec(args, a, kw, defaults) - is_bound_method = hasattr(func, '__self__') - if is_bound_method: - args = args[1:] # Omit 'self' - callargs = inspect.formatargspec(args, a, kw, None) - - ctx = {'signature': signature, 'funcargs': callargs} - six.exec_(wrapper_template % ctx, evaldict) - - wrapper = evaldict['wrapper'] - - update_wrapper(wrapper, func) - if is_bound_method: - wrapper = wrapper.__get__(func.__self__, type(func.__self__)) - return wrapper - - -class CallList(Sequence, Sized): - - def __init__(self): - self._calls = [] - - def __iter__(self): - return iter(self._calls) - - def __len__(self): - return len(self._calls) - - def __getitem__(self, idx): - return self._calls[idx] - - def add(self, request, response): - self._calls.append(Call(request, response)) - - def reset(self): - self._calls = [] - - -def _ensure_url_default_path(url, match_querystring): - if _is_string(url) and url.count('/') == 2: - if match_querystring: - return url.replace('?', '/?', 1) - else: - return url + '/' - return url - - -class RequestsMock(object): - DELETE = 'DELETE' - GET = 'GET' - HEAD = 'HEAD' - OPTIONS = 'OPTIONS' - PATCH = 'PATCH' - POST = 'POST' - PUT = 'PUT' - - def __init__(self, assert_all_requests_are_fired=True, pass_through=True): - self._calls = CallList() - self.reset() - self.assert_all_requests_are_fired = assert_all_requests_are_fired - self.pass_through = pass_through - self.original_send = HTTPAdapter.send - - def reset(self): - self._urls = [] - self._calls.reset() - - def add(self, method, url, body='', match_querystring=False, - status=200, adding_headers=None, stream=False, - content_type='text/plain', json=None): - - # if we were passed a `json` argument, - # override the body and content_type - if json is not None: - body = json_module.dumps(json) - content_type = 'application/json' - - # ensure the url has a default path set if the url is a string - url = _ensure_url_default_path(url, match_querystring) - - # body must be bytes - if isinstance(body, six.text_type): - body = body.encode('utf-8') - - self._urls.append({ - 'url': url, - 'method': method, - 'body': body, - 'content_type': content_type, - 'match_querystring': match_querystring, - 'status': status, - 'adding_headers': adding_headers, - 'stream': stream, - }) - - def add_callback(self, method, url, callback, match_querystring=False, - content_type='text/plain'): - # ensure the url has a default path set if the url is a string - # url = _ensure_url_default_path(url, match_querystring) - - self._urls.append({ - 'url': url, - 'method': method, - 'callback': callback, - 'content_type': content_type, - 'match_querystring': match_querystring, - }) - - @property - def calls(self): - return self._calls - - def __enter__(self): - self.start() - return self - - def __exit__(self, type, value, traceback): - success = type is None - self.stop(allow_assert=success) - self.reset() - return success - - def activate(self, func): - evaldict = {'responses': self, 'func': func} - return get_wrapped(func, _wrapper_template, evaldict) - - def _find_match(self, request): - for match in self._urls: - if request.method != match['method']: - continue - - if not self._has_url_match(match, request.url): - continue - - break - else: - return None - if self.assert_all_requests_are_fired: - # for each found match remove the url from the stack - self._urls.remove(match) - return match - - def _has_url_match(self, match, request_url): - url = match['url'] - - if not match['match_querystring']: - request_url = request_url.split('?', 1)[0] - - if _is_string(url): - if match['match_querystring']: - return self._has_strict_url_match(url, request_url) - else: - return url == request_url - elif isinstance(url, re._pattern_type) and url.match(request_url): - return True - else: - return False - - def _has_strict_url_match(self, url, other): - url_parsed = urlparse(url) - other_parsed = urlparse(other) - - if url_parsed[:3] != other_parsed[:3]: - return False - - url_qsl = sorted(parse_qsl(url_parsed.query)) - other_qsl = sorted(parse_qsl(other_parsed.query)) - return url_qsl == other_qsl - - def _on_request(self, adapter, request, **kwargs): - match = self._find_match(request) - # TODO(dcramer): find the correct class for this - if match is None: - if self.pass_through: - return self.original_send(adapter, request, **kwargs) - - error_msg = 'Connection refused: {0} {1}'.format(request.method, - request.url) - response = ConnectionError(error_msg) - response.request = request - - self._calls.add(request, response) - raise response - - if 'body' in match and isinstance(match['body'], Exception): - self._calls.add(request, match['body']) - raise match['body'] - - headers = {} - if match['content_type'] is not None: - headers['Content-Type'] = match['content_type'] - - if 'callback' in match: # use callback - status, r_headers, body = match['callback'](request) - if isinstance(body, six.text_type): - body = body.encode('utf-8') - body = BufferIO(body) - headers.update(r_headers) - - elif 'body' in match: - if match['adding_headers']: - headers.update(match['adding_headers']) - status = match['status'] - body = BufferIO(match['body']) - - response = HTTPResponse( - status=status, - reason=six.moves.http_client.responses[status], - body=body, - headers=headers, - preload_content=False, - # Need to not decode_content to mimic requests - decode_content=False, - ) - - response = adapter.build_response(request, response) - if not match.get('stream'): - response.content # NOQA - - try: - resp_cookies = Cookies.from_request(response.headers['set-cookie']) - response.cookies = cookiejar_from_dict(dict( - (v.name, v.value) - for _, v - in resp_cookies.items() - )) - except (KeyError, TypeError): - pass - - self._calls.add(request, response) - - return response - - def start(self): - try: - from unittest import mock - except ImportError: - import mock - - def unbound_on_send(adapter, request, *a, **kwargs): - return self._on_request(adapter, request, *a, **kwargs) - self._patcher1 = mock.patch('botocore.vendored.requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher1.start() - self._patcher2 = mock.patch('requests.adapters.HTTPAdapter.send', - unbound_on_send) - self._patcher2.start() - - def stop(self, allow_assert=True): - self._patcher1.stop() - self._patcher2.stop() - if allow_assert and self.assert_all_requests_are_fired and self._urls: - raise AssertionError( - 'Not all requests have been executed {0!r}'.format( - [(url['method'], url['url']) for url in self._urls])) - - -# expose default mock namespace -mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False, pass_through=False) -__all__ = [] -for __attr in (a for a in dir(_default_mock) if not a.startswith('_')): - __all__.append(__attr) - globals()[__attr] = getattr(_default_mock, __attr) diff --git a/moto/packages/responses/setup.cfg b/moto/packages/responses/setup.cfg deleted file mode 100644 index 9b6594f2e..000000000 --- a/moto/packages/responses/setup.cfg +++ /dev/null @@ -1,5 +0,0 @@ -[pytest] -addopts=--tb=short - -[bdist_wheel] -universal=1 diff --git a/moto/packages/responses/setup.py b/moto/packages/responses/setup.py deleted file mode 100644 index 911c07da4..000000000 --- a/moto/packages/responses/setup.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python -""" -responses -========= - -A utility library for mocking out the `requests` Python library. - -:copyright: (c) 2015 David Cramer -:license: Apache 2.0 -""" - -import sys -import logging - -from setuptools import setup -from setuptools.command.test import test as TestCommand -import pkg_resources - - -setup_requires = [] - -if 'test' in sys.argv: - setup_requires.append('pytest') - -install_requires = [ - 'requests>=2.0', - 'cookies', - 'six', -] - -tests_require = [ - 'pytest', - 'coverage >= 3.7.1, < 5.0.0', - 'pytest-cov', - 'flake8', -] - - -extras_require = { - ':python_version in "2.6, 2.7, 3.2"': ['mock'], - 'tests': tests_require, -} - -try: - if 'bdist_wheel' not in sys.argv: - for key, value in extras_require.items(): - if key.startswith(':') and pkg_resources.evaluate_marker(key[1:]): - install_requires.extend(value) -except Exception: - logging.getLogger(__name__).exception( - 'Something went wrong calculating platform specific dependencies, so ' - "you're getting them all!" - ) - for key, value in extras_require.items(): - if key.startswith(':'): - install_requires.extend(value) - - -class PyTest(TestCommand): - - def finalize_options(self): - TestCommand.finalize_options(self) - self.test_args = ['test_responses.py'] - self.test_suite = True - - def run_tests(self): - # import here, cause outside the eggs aren't loaded - import pytest - errno = pytest.main(self.test_args) - sys.exit(errno) - - -setup( - name='responses', - version='0.6.0', - author='David Cramer', - description=( - 'A utility library for mocking out the `requests` Python library.' - ), - url='https://github.com/getsentry/responses', - license='Apache 2.0', - long_description=open('README.rst').read(), - py_modules=['responses', 'test_responses'], - zip_safe=False, - install_requires=install_requires, - extras_require=extras_require, - tests_require=tests_require, - setup_requires=setup_requires, - cmdclass={'test': PyTest}, - include_package_data=True, - classifiers=[ - 'Intended Audience :: Developers', - 'Intended Audience :: System Administrators', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 3', - 'Topic :: Software Development' - ], -) diff --git a/moto/packages/responses/test_responses.py b/moto/packages/responses/test_responses.py deleted file mode 100644 index 967a535cf..000000000 --- a/moto/packages/responses/test_responses.py +++ /dev/null @@ -1,444 +0,0 @@ -from __future__ import ( - absolute_import, print_function, division, unicode_literals -) - -import re -import requests -import responses -import pytest - -from inspect import getargspec -from requests.exceptions import ConnectionError, HTTPError - - -def assert_reset(): - assert len(responses._default_mock._urls) == 0 - assert len(responses.calls) == 0 - - -def assert_response(resp, body=None, content_type='text/plain'): - assert resp.status_code == 200 - assert resp.reason == 'OK' - if content_type is not None: - assert resp.headers['Content-Type'] == content_type - else: - assert 'Content-Type' not in resp.headers - assert resp.text == body - - -def test_response(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert responses.calls[1].request.url == 'http://example.com/?foo=bar' - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_connection_error(): - @responses.activate - def run(): - responses.add(responses.GET, 'http://example.com') - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo') - - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/foo' - assert type(responses.calls[0].response) is ConnectionError - assert responses.calls[0].response.request - - run() - assert_reset() - - -def test_match_querystring(): - @responses.activate - def run(): - url = 'http://example.com?test=1&foo=bar' - responses.add( - responses.GET, url, - match_querystring=True, body=b'test') - resp = requests.get('http://example.com?test=1&foo=bar') - assert_response(resp, 'test') - resp = requests.get('http://example.com?foo=bar&test=1') - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_match_querystring_error(): - @responses.activate - def run(): - responses.add( - responses.GET, 'http://example.com/?test=1', - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=2') - - run() - assert_reset() - - -def test_match_querystring_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - body='test1', match_querystring=True) - - resp = requests.get('http://example.com/foo/?test=1') - assert_response(resp, 'test1') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - body='test2', match_querystring=False) - - resp = requests.get('http://example.com/foo/?test=2') - assert_response(resp, 'test2') - - run() - assert_reset() - - -def test_match_querystring_error_regex(): - @responses.activate - def run(): - """Note that `match_querystring` value shouldn't matter when passing a - regular expression""" - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=1'), - match_querystring=True) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=3') - - responses.add( - responses.GET, re.compile(r'http://example\.com/foo/\?test=2'), - match_querystring=False) - - with pytest.raises(ConnectionError): - requests.get('http://example.com/foo/?test=4') - - run() - assert_reset() - - -def test_accept_string_body(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test') - resp = requests.get(url) - assert_response(resp, 'test') - - run() - assert_reset() - - -def test_accept_json_body(): - @responses.activate - def run(): - content_type = 'application/json' - - url = 'http://example.com/' - responses.add( - responses.GET, url, json={"message": "success"}) - resp = requests.get(url) - assert_response(resp, '{"message": "success"}', content_type) - - url = 'http://example.com/1/' - responses.add(responses.GET, url, json=[]) - resp = requests.get(url) - assert_response(resp, '[]', content_type) - - run() - assert_reset() - - -def test_no_content_type(): - @responses.activate - def run(): - url = 'http://example.com/' - responses.add( - responses.GET, url, body='test', content_type=None) - resp = requests.get(url) - assert_response(resp, 'test', content_type=None) - - run() - assert_reset() - - -def test_throw_connection_error_explicit(): - @responses.activate - def run(): - url = 'http://example.com' - exception = HTTPError('HTTP Error') - responses.add( - responses.GET, url, exception) - - with pytest.raises(HTTPError) as HE: - requests.get(url) - - assert str(HE.value) == 'HTTP Error' - - run() - assert_reset() - - -def test_callback(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert resp.headers['foo'] == 'bar' - - run() - assert_reset() - - -def test_callback_no_content_type(): - body = b'test callback' - status = 400 - reason = 'Bad Request' - headers = {'foo': 'bar'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback( - responses.GET, url, request_callback, content_type=None) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert resp.reason == reason - assert 'foo' in resp.headers - assert 'Content-Type' not in resp.headers - - run() - assert_reset() - - -def test_regular_expression_url(): - @responses.activate - def run(): - url = re.compile(r'https?://(.*\.)?example.com') - responses.add(responses.GET, url, body=b'test') - - resp = requests.get('http://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://example.com') - assert_response(resp, 'test') - - resp = requests.get('https://uk.example.com') - assert_response(resp, 'test') - - with pytest.raises(ConnectionError): - requests.get('https://uk.exaaample.com') - - run() - assert_reset() - - -def test_custom_adapter(): - @responses.activate - def run(): - url = "http://example.com" - responses.add(responses.GET, url, body=b'test') - - calls = [0] - - class DummyAdapter(requests.adapters.HTTPAdapter): - - def send(self, *a, **k): - calls[0] += 1 - return super(DummyAdapter, self).send(*a, **k) - - # Test that the adapter is actually used - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url, allow_redirects=False) - assert calls[0] == 1 - - # Test that the response is still correctly emulated - session = requests.Session() - session.mount("http://", DummyAdapter()) - - resp = session.get(url) - assert_response(resp, 'test') - - run() - - -def test_responses_as_context_manager(): - def run(): - with responses.mock: - responses.add(responses.GET, 'http://example.com', body=b'test') - resp = requests.get('http://example.com') - assert_response(resp, 'test') - assert len(responses.calls) == 1 - assert responses.calls[0].request.url == 'http://example.com/' - assert responses.calls[0].response.content == b'test' - - resp = requests.get('http://example.com?foo=bar') - assert_response(resp, 'test') - assert len(responses.calls) == 2 - assert (responses.calls[1].request.url == - 'http://example.com/?foo=bar') - assert responses.calls[1].response.content == b'test' - - run() - assert_reset() - - -def test_activate_doesnt_change_signature(): - def test_function(a, b=None): - return (a, b) - - decorated_test_function = responses.activate(test_function) - assert getargspec(test_function) == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_function(1, 2) - assert decorated_test_function(3) == test_function(3) - - -def test_activate_doesnt_change_signature_for_method(): - class TestCase(object): - - def test_function(self, a, b=None): - return (self, a, b) - - test_case = TestCase() - argspec = getargspec(test_case.test_function) - decorated_test_function = responses.activate(test_case.test_function) - assert argspec == getargspec(decorated_test_function) - assert decorated_test_function(1, 2) == test_case.test_function(1, 2) - assert decorated_test_function(3) == test_case.test_function(3) - - -def test_response_cookies(): - body = b'test callback' - status = 200 - headers = {'set-cookie': 'session_id=12345; a=b; c=d'} - url = 'http://example.com/' - - def request_callback(request): - return (status, headers, body) - - @responses.activate - def run(): - responses.add_callback(responses.GET, url, request_callback) - resp = requests.get(url) - assert resp.text == "test callback" - assert resp.status_code == status - assert 'session_id' in resp.cookies - assert resp.cookies['session_id'] == '12345' - assert resp.cookies['a'] == 'b' - assert resp.cookies['c'] == 'd' - run() - assert_reset() - - -def test_assert_all_requests_are_fired(): - def run(): - with pytest.raises(AssertionError) as excinfo: - with responses.RequestsMock( - assert_all_requests_are_fired=True) as m: - m.add(responses.GET, 'http://example.com', body=b'test') - assert 'http://example.com' in str(excinfo.value) - assert responses.GET in str(excinfo) - - # check that assert_all_requests_are_fired default to True - with pytest.raises(AssertionError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - - # check that assert_all_requests_are_fired doesn't swallow exceptions - with pytest.raises(ValueError): - with responses.RequestsMock() as m: - m.add(responses.GET, 'http://example.com', body=b'test') - raise ValueError() - - run() - assert_reset() - - -def test_allow_redirects_samehost(): - redirecting_url = 'http://example.com' - final_url_path = '/1' - final_url = '{0}{1}'.format(redirecting_url, final_url_path) - url_re = re.compile(r'^http://example.com(/)?(\d+)?$') - - def request_callback(request): - # endpoint of chained redirect - if request.url.endswith(final_url_path): - return 200, (), b'test' - # otherwise redirect to an integer path - else: - if request.url.endswith('/0'): - n = 1 - else: - n = 0 - redirect_headers = {'location': '/{0!s}'.format(n)} - return 301, redirect_headers, None - - def run(): - # setup redirect - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_no_redirects = requests.get(redirecting_url, - allow_redirects=False) - assert resp_no_redirects.status_code == 301 - assert len(responses.calls) == 1 # 1x300 - assert responses.calls[0][1].status_code == 301 - assert_reset() - - with responses.mock: - responses.add_callback(responses.GET, url_re, request_callback) - resp_yes_redirects = requests.get(redirecting_url, - allow_redirects=True) - assert len(responses.calls) == 3 # 2x300 + 1x200 - assert len(resp_yes_redirects.history) == 2 - assert resp_yes_redirects.status_code == 200 - assert final_url == resp_yes_redirects.url - status_codes = [call[1].status_code for call in responses.calls] - assert status_codes == [301, 301, 200] - assert_reset() - - run() - assert_reset() diff --git a/moto/packages/responses/tox.ini b/moto/packages/responses/tox.ini deleted file mode 100644 index 0a31c03ab..000000000 --- a/moto/packages/responses/tox.ini +++ /dev/null @@ -1,11 +0,0 @@ - -[tox] -envlist = {py26,py27,py32,py33,py34,py35} - -[testenv] -deps = - pytest - pytest-cov - pytest-flakes -commands = - py.test . --cov responses --cov-report term-missing --flakes diff --git a/moto/rds2/models.py b/moto/rds2/models.py index 268ae5af2..29fa95959 100644 --- a/moto/rds2/models.py +++ b/moto/rds2/models.py @@ -103,6 +103,8 @@ class Database(BaseModel): if not self.option_group_name and self.engine in self.default_option_groups: self.option_group_name = self.default_option_groups[self.engine] self.character_set_name = kwargs.get('character_set_name', None) + self.iam_database_authentication_enabled = False + self.dbi_resource_id = "db-M5ENSHXFPU6XHZ4G4ZEI5QIO2U" self.tags = kwargs.get('tags', []) @property @@ -142,6 +144,7 @@ class Database(BaseModel): {{ database.multi_az }} {{ database.db_instance_identifier }} + {{ database.dbi_resource_id }} 03:50-04:20 wed:06:38-wed:07:08 @@ -163,6 +166,7 @@ class Database(BaseModel): {{ database.source_db_identifier }} {% endif %} {{ database.engine }} + {{database.iam_database_authentication_enabled }} {{ database.license_model }} {{ database.engine_version }} diff --git a/moto/rds2/responses.py b/moto/rds2/responses.py index 3e093221d..eddb0042b 100644 --- a/moto/rds2/responses.py +++ b/moto/rds2/responses.py @@ -123,7 +123,7 @@ class RDS2Response(BaseResponse): start = all_ids.index(marker) + 1 else: start = 0 - page_size = self._get_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier + page_size = self._get_int_param('MaxRecords', 50) # the default is 100, but using 50 to make testing easier instances_resp = all_instances[start:start + page_size] next_marker = None if len(all_instances) > start + page_size: diff --git a/moto/redshift/exceptions.py b/moto/redshift/exceptions.py index a89ed5a04..b0cef57ad 100644 --- a/moto/redshift/exceptions.py +++ b/moto/redshift/exceptions.py @@ -58,6 +58,21 @@ class InvalidSubnetError(RedshiftClientError): "Subnet {0} not found.".format(subnet_identifier)) +class SnapshotCopyGrantAlreadyExistsFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantAlreadyExistsFaultError, self).__init__( + 'SnapshotCopyGrantAlreadyExistsFault', + "Cannot create the snapshot copy grant because a grant " + "with the identifier '{0}' already exists".format(snapshot_copy_grant_name)) + + +class SnapshotCopyGrantNotFoundFaultError(RedshiftClientError): + def __init__(self, snapshot_copy_grant_name): + super(SnapshotCopyGrantNotFoundFaultError, self).__init__( + 'SnapshotCopyGrantNotFoundFault', + "Snapshot copy grant not found: {0}".format(snapshot_copy_grant_name)) + + class ClusterSnapshotNotFoundError(RedshiftClientError): def __init__(self, snapshot_identifier): super(ClusterSnapshotNotFoundError, self).__init__( @@ -93,3 +108,24 @@ class ResourceNotFoundFaultError(RedshiftClientError): msg = message super(ResourceNotFoundFaultError, self).__init__( 'ResourceNotFoundFault', msg) + + +class SnapshotCopyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyDisabledFaultError, self).__init__( + 'SnapshotCopyDisabledFault', + "Cannot modify retention period because snapshot copy is disabled on Cluster {0}.".format(cluster_identifier)) + + +class SnapshotCopyAlreadyDisabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyDisabledFaultError, self).__init__( + 'SnapshotCopyAlreadyDisabledFault', + "Snapshot Copy is already disabled on Cluster {0}.".format(cluster_identifier)) + + +class SnapshotCopyAlreadyEnabledFaultError(RedshiftClientError): + def __init__(self, cluster_identifier): + super(SnapshotCopyAlreadyEnabledFaultError, self).__init__( + 'SnapshotCopyAlreadyEnabledFault', + "Snapshot Copy is already enabled on Cluster {0}.".format(cluster_identifier)) diff --git a/moto/redshift/models.py b/moto/redshift/models.py index fa642ef01..4eb9d6b5c 100644 --- a/moto/redshift/models.py +++ b/moto/redshift/models.py @@ -4,6 +4,7 @@ import copy import datetime import boto.redshift +from botocore.exceptions import ClientError from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -17,7 +18,12 @@ from .exceptions import ( ClusterSubnetGroupNotFoundError, InvalidParameterValueError, InvalidSubnetError, - ResourceNotFoundFaultError + ResourceNotFoundFaultError, + SnapshotCopyAlreadyDisabledFaultError, + SnapshotCopyAlreadyEnabledFaultError, + SnapshotCopyDisabledFaultError, + SnapshotCopyGrantAlreadyExistsFaultError, + SnapshotCopyGrantNotFoundFaultError, ) @@ -67,7 +73,7 @@ class Cluster(TaggableResourceMixin, BaseModel): preferred_maintenance_window, cluster_parameter_group_name, automated_snapshot_retention_period, port, cluster_version, allow_version_upgrade, number_of_nodes, publicly_accessible, - encrypted, region_name, tags=None): + encrypted, region_name, tags=None, iam_roles_arn=None): super(Cluster, self).__init__(region_name, tags) self.redshift_backend = redshift_backend self.cluster_identifier = cluster_identifier @@ -112,6 +118,8 @@ class Cluster(TaggableResourceMixin, BaseModel): else: self.number_of_nodes = 1 + self.iam_roles_arn = iam_roles_arn or [] + @classmethod def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name): redshift_backend = redshift_backends[region_name] @@ -194,7 +202,7 @@ class Cluster(TaggableResourceMixin, BaseModel): return self.cluster_identifier def to_json(self): - return { + json_response = { "MasterUsername": self.master_username, "MasterUserPassword": "****", "ClusterVersion": self.cluster_version, @@ -228,7 +236,32 @@ class Cluster(TaggableResourceMixin, BaseModel): "Port": self.port }, "PendingModifiedValues": [], - "Tags": self.tags + "Tags": self.tags, + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] + } + + try: + json_response['ClusterSnapshotCopyStatus'] = self.cluster_snapshot_copy_status + except AttributeError: + pass + return json_response + + +class SnapshotCopyGrant(TaggableResourceMixin, BaseModel): + + resource_type = 'snapshotcopygrant' + + def __init__(self, snapshot_copy_grant_name, kms_key_id): + self.snapshot_copy_grant_name = snapshot_copy_grant_name + self.kms_key_id = kms_key_id + + def to_json(self): + return { + "SnapshotCopyGrantName": self.snapshot_copy_grant_name, + "KmsKeyId": self.kms_key_id } @@ -351,7 +384,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): resource_type = 'snapshot' - def __init__(self, cluster, snapshot_identifier, region_name, tags=None): + def __init__(self, cluster, snapshot_identifier, region_name, tags=None, iam_roles_arn=None): super(Snapshot, self).__init__(region_name, tags) self.cluster = copy.copy(cluster) self.snapshot_identifier = snapshot_identifier @@ -359,6 +392,7 @@ class Snapshot(TaggableResourceMixin, BaseModel): self.status = 'available' self.create_time = iso_8601_datetime_with_milliseconds( datetime.datetime.now()) + self.iam_roles_arn = iam_roles_arn or [] @property def resource_id(self): @@ -380,7 +414,11 @@ class Snapshot(TaggableResourceMixin, BaseModel): 'NodeType': self.cluster.node_type, 'NumberOfNodes': self.cluster.number_of_nodes, 'DBName': self.cluster.db_name, - 'Tags': self.tags + 'Tags': self.tags, + "IamRoles": [{ + "ApplyStatus": "in-sync", + "IamRoleArn": iam_role_arn + } for iam_role_arn in self.iam_roles_arn] } @@ -410,6 +448,7 @@ class RedshiftBackend(BaseBackend): 'snapshot': self.snapshots, 'subnetgroup': self.subnet_groups } + self.snapshot_copy_grants = {} def reset(self): ec2_backend = self.ec2_backend @@ -417,6 +456,43 @@ class RedshiftBackend(BaseBackend): self.__dict__ = {} self.__init__(ec2_backend, region_name) + def enable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if not hasattr(cluster, 'cluster_snapshot_copy_status'): + if cluster.encrypted == 'true' and kwargs['snapshot_copy_grant_name'] is None: + raise ClientError( + 'InvalidParameterValue', + 'SnapshotCopyGrantName is required for Snapshot Copy ' + 'on KMS encrypted clusters.' + ) + status = { + 'DestinationRegion': kwargs['destination_region'], + 'RetentionPeriod': kwargs['retention_period'], + 'SnapshotCopyGrantName': kwargs['snapshot_copy_grant_name'], + } + cluster.cluster_snapshot_copy_status = status + return cluster + else: + raise SnapshotCopyAlreadyEnabledFaultError(cluster_identifier) + + def disable_snapshot_copy(self, **kwargs): + cluster_identifier = kwargs['cluster_identifier'] + cluster = self.clusters[cluster_identifier] + if hasattr(cluster, 'cluster_snapshot_copy_status'): + del cluster.cluster_snapshot_copy_status + return cluster + else: + raise SnapshotCopyAlreadyDisabledFaultError(cluster_identifier) + + def modify_snapshot_copy_retention_period(self, cluster_identifier, retention_period): + cluster = self.clusters[cluster_identifier] + if hasattr(cluster, 'cluster_snapshot_copy_status'): + cluster.cluster_snapshot_copy_status['RetentionPeriod'] = retention_period + return cluster + else: + raise SnapshotCopyDisabledFaultError(cluster_identifier) + def create_cluster(self, **cluster_kwargs): cluster_identifier = cluster_kwargs['cluster_identifier'] cluster = Cluster(self, **cluster_kwargs) @@ -568,6 +644,31 @@ class RedshiftBackend(BaseBackend): create_kwargs.update(kwargs) return self.create_cluster(**create_kwargs) + def create_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + kms_key_id = kwargs['kms_key_id'] + if snapshot_copy_grant_name not in self.snapshot_copy_grants: + snapshot_copy_grant = SnapshotCopyGrant(snapshot_copy_grant_name, kms_key_id) + self.snapshot_copy_grants[snapshot_copy_grant_name] = snapshot_copy_grant + return snapshot_copy_grant + raise SnapshotCopyGrantAlreadyExistsFaultError(snapshot_copy_grant_name) + + def delete_snapshot_copy_grant(self, **kwargs): + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return self.snapshot_copy_grants.pop(snapshot_copy_grant_name) + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + + def describe_snapshot_copy_grants(self, **kwargs): + copy_grants = self.snapshot_copy_grants.values() + snapshot_copy_grant_name = kwargs['snapshot_copy_grant_name'] + if snapshot_copy_grant_name: + if snapshot_copy_grant_name in self.snapshot_copy_grants: + return [self.snapshot_copy_grants[snapshot_copy_grant_name]] + else: + raise SnapshotCopyGrantNotFoundFaultError(snapshot_copy_grant_name) + return copy_grants + def _get_resource_from_arn(self, arn): try: arn_breakdown = arn.split(':') diff --git a/moto/redshift/responses.py b/moto/redshift/responses.py index a320f9cae..69fbac7c1 100644 --- a/moto/redshift/responses.py +++ b/moto/redshift/responses.py @@ -99,6 +99,12 @@ class RedshiftResponse(BaseResponse): vpc_security_group_ids = self._get_multi_param('VpcSecurityGroupIds.VpcSecurityGroupId') return vpc_security_group_ids + def _get_iam_roles(self): + iam_roles = self._get_multi_param('IamRoles.member') + if not iam_roles: + iam_roles = self._get_multi_param('IamRoles.IamRoleArn') + return iam_roles + def _get_subnet_ids(self): subnet_ids = self._get_multi_param('SubnetIds.member') if not subnet_ids: @@ -127,7 +133,8 @@ class RedshiftResponse(BaseResponse): "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), "region_name": self.region, - "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) + "tags": self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')), + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.create_cluster(**cluster_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -162,6 +169,7 @@ class RedshiftResponse(BaseResponse): "automated_snapshot_retention_period": self._get_int_param( 'AutomatedSnapshotRetentionPeriod'), "region_name": self.region, + "iam_roles_arn": self._get_iam_roles(), } cluster = self.redshift_backend.restore_from_cluster_snapshot(**restore_kwargs).to_json() cluster['ClusterStatus'] = 'creating' @@ -209,6 +217,7 @@ class RedshiftResponse(BaseResponse): "number_of_nodes": self._get_int_param('NumberOfNodes'), "publicly_accessible": self._get_param("PubliclyAccessible"), "encrypted": self._get_param("Encrypted"), + "iam_roles_arn": self._get_iam_roles(), } cluster_kwargs = {} # We only want parameters that were actually passed in, otherwise @@ -457,6 +466,55 @@ class RedshiftResponse(BaseResponse): } }) + def create_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + 'kms_key_id': self._get_param('KmsKeyId'), + 'region_name': self._get_param('Region'), + } + + copy_grant = self.redshift_backend.create_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "CreateSnapshotCopyGrantResponse": { + "CreateSnapshotCopyGrantResult": { + "SnapshotCopyGrant": copy_grant.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def delete_snapshot_copy_grant(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + self.redshift_backend.delete_snapshot_copy_grant(**copy_grant_kwargs) + return self.get_response({ + "DeleteSnapshotCopyGrantResponse": { + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def describe_snapshot_copy_grants(self): + copy_grant_kwargs = { + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + + copy_grants = self.redshift_backend.describe_snapshot_copy_grants(**copy_grant_kwargs) + return self.get_response({ + "DescribeSnapshotCopyGrantsResponse": { + "DescribeSnapshotCopyGrantsResult": { + "SnapshotCopyGrants": [copy_grant.to_json() for copy_grant in copy_grants] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + def create_tags(self): resource_name = self._get_param('ResourceName') tags = self.unpack_complex_list_params('Tags.Tag', ('Key', 'Value')) @@ -501,3 +559,58 @@ class RedshiftResponse(BaseResponse): } } }) + + def enable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'destination_region': self._get_param('DestinationRegion'), + 'retention_period': self._get_param('RetentionPeriod', 7), + 'snapshot_copy_grant_name': self._get_param('SnapshotCopyGrantName'), + } + cluster = self.redshift_backend.enable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "EnableSnapshotCopyResponse": { + "EnableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def disable_snapshot_copy(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + } + cluster = self.redshift_backend.disable_snapshot_copy(**snapshot_copy_kwargs) + + return self.get_response({ + "DisableSnapshotCopyResponse": { + "DisableSnapshotCopyResult": { + "Cluster": cluster.to_json() + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) + + def modify_snapshot_copy_retention_period(self): + snapshot_copy_kwargs = { + 'cluster_identifier': self._get_param('ClusterIdentifier'), + 'retention_period': self._get_param('RetentionPeriod'), + } + cluster = self.redshift_backend.modify_snapshot_copy_retention_period(**snapshot_copy_kwargs) + + return self.get_response({ + "ModifySnapshotCopyRetentionPeriodResponse": { + "ModifySnapshotCopyRetentionPeriodResult": { + "Clusters": [cluster.to_json()] + }, + "ResponseMetadata": { + "RequestId": "384ac68d-3775-11df-8963-01868b7c937a", + } + } + }) diff --git a/moto/resourcegroupstaggingapi/models.py b/moto/resourcegroupstaggingapi/models.py index fbc54454b..4aec63aa6 100644 --- a/moto/resourcegroupstaggingapi/models.py +++ b/moto/resourcegroupstaggingapi/models.py @@ -119,15 +119,17 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): def tag_filter(tag_list): result = [] + if tag_filters: + for tag in tag_list: + temp_result = [] + for f in filters: + f_result = f(tag['Key'], tag['Value']) + temp_result.append(f_result) + result.append(all(temp_result)) - for tag in tag_list: - temp_result = [] - for f in filters: - f_result = f(tag['Key'], tag['Value']) - temp_result.append(f_result) - result.append(all(temp_result)) - - return any(result) + return any(result) + else: + return True # Do S3, resource type s3 if not resource_type_filters or 's3' in resource_type_filters: @@ -210,6 +212,23 @@ class ResourceGroupsTaggingAPIBackend(BaseBackend): # TODO add these to the keys and values functions / combine functions # ELB + def get_elbv2_tags(arn): + result = [] + for key, value in self.elbv2_backend.load_balancers[elb.arn].tags.items(): + result.append({'Key': key, 'Value': value}) + return result + + if not resource_type_filters or 'elasticloadbalancer' in resource_type_filters or 'elasticloadbalancer:loadbalancer' in resource_type_filters: + for elb in self.elbv2_backend.load_balancers.values(): + tags = get_elbv2_tags(elb.arn) + # if 'elasticloadbalancer:loadbalancer' in resource_type_filters: + # from IPython import embed + # embed() + if not tag_filter(tags): # Skip if no tags, or invalid filter + continue + + yield {'ResourceARN': '{0}'.format(elb.arn), 'Tags': tags} + # EMR Cluster # Glacier Vault diff --git a/moto/route53/models.py b/moto/route53/models.py index af8bb690a..d483d22e2 100644 --- a/moto/route53/models.py +++ b/moto/route53/models.py @@ -140,7 +140,9 @@ class RecordSet(BaseModel): {% if record_set.region %} {{ record_set.region }} {% endif %} - {{ record_set.ttl }} + {% if record_set.ttl %} + {{ record_set.ttl }} + {% endif %} {% for record in record_set.records %} diff --git a/moto/route53/responses.py b/moto/route53/responses.py index 00e5c60a5..6679e7945 100644 --- a/moto/route53/responses.py +++ b/moto/route53/responses.py @@ -150,7 +150,7 @@ class Route53(BaseResponse): elif method == "GET": querystring = parse_qs(parsed_url.query) - template = Template(LIST_RRSET_REPONSE) + template = Template(LIST_RRSET_RESPONSE) start_type = querystring.get("type", [None])[0] start_name = querystring.get("name", [None])[0] record_sets = the_zone.get_record_sets(start_type, start_name) @@ -182,9 +182,9 @@ class Route53(BaseResponse): elif method == "DELETE": health_check_id = parsed_url.path.split("/")[-1] route53_backend.delete_health_check(health_check_id) - return 200, headers, DELETE_HEALTH_CHECK_REPONSE + return 200, headers, DELETE_HEALTH_CHECK_RESPONSE elif method == "GET": - template = Template(LIST_HEALTH_CHECKS_REPONSE) + template = Template(LIST_HEALTH_CHECKS_RESPONSE) health_checks = route53_backend.get_health_checks() return 200, headers, template.render(health_checks=health_checks) @@ -248,7 +248,7 @@ CHANGE_TAGS_FOR_RESOURCE_RESPONSE = """ +LIST_RRSET_RESPONSE = """ {% for record_set in record_sets %} {{ record_set.to_xml() }} @@ -350,7 +350,7 @@ CREATE_HEALTH_CHECK_RESPONSE = """ {{ health_check.to_xml() }} """ -LIST_HEALTH_CHECKS_REPONSE = """ +LIST_HEALTH_CHECKS_RESPONSE = """ {% for health_check in health_checks %} @@ -361,6 +361,6 @@ LIST_HEALTH_CHECKS_REPONSE = """ {{ health_checks|length }} """ -DELETE_HEALTH_CHECK_REPONSE = """ +DELETE_HEALTH_CHECK_RESPONSE = """ """ diff --git a/moto/s3/exceptions.py b/moto/s3/exceptions.py index 24704e7ef..8c6e291ef 100644 --- a/moto/s3/exceptions.py +++ b/moto/s3/exceptions.py @@ -111,3 +111,60 @@ class MalformedXML(S3ClientError): "MalformedXML", "The XML you provided was not well-formed or did not validate against our published schema", *args, **kwargs) + + +class MalformedACLError(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(MalformedACLError, self).__init__( + "MalformedACLError", + "The XML you provided was not well-formed or did not validate against our published schema", + *args, **kwargs) + + +class InvalidTargetBucketForLogging(S3ClientError): + code = 400 + + def __init__(self, msg): + super(InvalidTargetBucketForLogging, self).__init__("InvalidTargetBucketForLogging", msg) + + +class CrossLocationLoggingProhibitted(S3ClientError): + code = 403 + + def __init__(self): + super(CrossLocationLoggingProhibitted, self).__init__( + "CrossLocationLoggingProhibitted", + "Cross S3 location logging not allowed." + ) + + +class InvalidNotificationARN(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationARN, self).__init__( + "InvalidArgument", + "The ARN is not well formed", + *args, **kwargs) + + +class InvalidNotificationDestination(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationDestination, self).__init__( + "InvalidArgument", + "The notification destination service region is not valid for the bucket location constraint", + *args, **kwargs) + + +class InvalidNotificationEvent(S3ClientError): + code = 400 + + def __init__(self, *args, **kwargs): + super(InvalidNotificationEvent, self).__init__( + "InvalidArgument", + "The event is not supported for notifications", + *args, **kwargs) diff --git a/moto/s3/models.py b/moto/s3/models.py index 91d3c1e2d..3b4623d61 100644 --- a/moto/s3/models.py +++ b/moto/s3/models.py @@ -6,12 +6,16 @@ import hashlib import copy import itertools import codecs +import random +import string + import six from bisect import insort from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds, rfc_1123_datetime -from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey +from .exceptions import BucketAlreadyExists, MissingBucket, InvalidPart, EntityTooSmall, MissingKey, \ + InvalidNotificationDestination, MalformedXML from .utils import clean_key_name, _VersionedKeyStore UPLOAD_ID_BYTES = 43 @@ -270,7 +274,7 @@ def get_canned_acl(acl): grants.append(FakeGrant([ALL_USERS_GRANTEE], [PERMISSION_READ])) elif acl == 'public-read-write': grants.append(FakeGrant([ALL_USERS_GRANTEE], [ - PERMISSION_READ, PERMISSION_WRITE])) + PERMISSION_READ, PERMISSION_WRITE])) elif acl == 'authenticated-read': grants.append( FakeGrant([AUTHENTICATED_USERS_GRANTEE], [PERMISSION_READ])) @@ -282,7 +286,7 @@ def get_canned_acl(acl): pass # TODO: bucket owner, EC2 Read elif acl == 'log-delivery-write': grants.append(FakeGrant([LOG_DELIVERY_GRANTEE], [ - PERMISSION_READ_ACP, PERMISSION_WRITE])) + PERMISSION_READ_ACP, PERMISSION_WRITE])) else: assert False, 'Unknown canned acl: %s' % (acl,) return FakeAcl(grants=grants) @@ -307,18 +311,35 @@ class FakeTag(BaseModel): self.value = value +class LifecycleFilter(BaseModel): + + def __init__(self, prefix=None, tag=None, and_filter=None): + self.prefix = prefix or '' + self.tag = tag + self.and_filter = and_filter + + +class LifecycleAndFilter(BaseModel): + + def __init__(self, prefix=None, tags=None): + self.prefix = prefix or '' + self.tags = tags + + class LifecycleRule(BaseModel): - def __init__(self, id=None, prefix=None, status=None, expiration_days=None, - expiration_date=None, transition_days=None, + def __init__(self, id=None, prefix=None, lc_filter=None, status=None, expiration_days=None, + expiration_date=None, transition_days=None, expired_object_delete_marker=None, transition_date=None, storage_class=None): self.id = id self.prefix = prefix + self.filter = lc_filter self.status = status self.expiration_days = expiration_days self.expiration_date = expiration_date self.transition_days = transition_days self.transition_date = transition_date + self.expired_object_delete_marker = expired_object_delete_marker self.storage_class = storage_class @@ -333,6 +354,26 @@ class CorsRule(BaseModel): self.max_age_seconds = max_age_seconds +class Notification(BaseModel): + + def __init__(self, arn, events, filters=None, id=None): + self.id = id if id else ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(50)) + self.arn = arn + self.events = events + self.filters = filters if filters else {} + + +class NotificationConfiguration(BaseModel): + + def __init__(self, topic=None, queue=None, cloud_function=None): + self.topic = [Notification(t["Topic"], t["Event"], filters=t.get("Filter"), id=t.get("Id")) for t in topic] \ + if topic else [] + self.queue = [Notification(q["Queue"], q["Event"], filters=q.get("Filter"), id=q.get("Id")) for q in queue] \ + if queue else [] + self.cloud_function = [Notification(c["CloudFunction"], c["Event"], filters=c.get("Filter"), id=c.get("Id")) + for c in cloud_function] if cloud_function else [] + + class FakeBucket(BaseModel): def __init__(self, name, region_name): @@ -347,6 +388,8 @@ class FakeBucket(BaseModel): self.acl = get_canned_acl('private') self.tags = FakeTagging() self.cors = [] + self.logging = {} + self.notification_configuration = None @property def location(self): @@ -361,12 +404,50 @@ class FakeBucket(BaseModel): for rule in rules: expiration = rule.get('Expiration') transition = rule.get('Transition') + + eodm = None + if expiration and expiration.get("ExpiredObjectDeleteMarker") is not None: + # This cannot be set if Date or Days is set: + if expiration.get("Days") or expiration.get("Date"): + raise MalformedXML() + eodm = expiration["ExpiredObjectDeleteMarker"] + + # Pull out the filter: + lc_filter = None + if rule.get("Filter"): + # Can't have both `Filter` and `Prefix` (need to check for the presence of the key): + try: + if rule["Prefix"] or not rule["Prefix"]: + raise MalformedXML() + except KeyError: + pass + + and_filter = None + if rule["Filter"].get("And"): + and_tags = [] + if rule["Filter"]["And"].get("Tag"): + if not isinstance(rule["Filter"]["And"]["Tag"], list): + rule["Filter"]["And"]["Tag"] = [rule["Filter"]["And"]["Tag"]] + + for t in rule["Filter"]["And"]["Tag"]: + and_tags.append(FakeTag(t["Key"], t.get("Value", ''))) + + and_filter = LifecycleAndFilter(prefix=rule["Filter"]["And"]["Prefix"], tags=and_tags) + + filter_tag = None + if rule["Filter"].get("Tag"): + filter_tag = FakeTag(rule["Filter"]["Tag"]["Key"], rule["Filter"]["Tag"].get("Value", '')) + + lc_filter = LifecycleFilter(prefix=rule["Filter"]["Prefix"], tag=filter_tag, and_filter=and_filter) + self.rules.append(LifecycleRule( id=rule.get('ID'), prefix=rule.get('Prefix'), + lc_filter=lc_filter, status=rule['Status'], expiration_days=expiration.get('Days') if expiration else None, expiration_date=expiration.get('Date') if expiration else None, + expired_object_delete_marker=eodm, transition_days=transition.get('Days') if transition else None, transition_date=transition.get('Date') if transition else None, storage_class=transition[ @@ -422,6 +503,59 @@ class FakeBucket(BaseModel): def tagging(self): return self.tags + def set_logging(self, logging_config, bucket_backend): + if not logging_config: + self.logging = {} + return + + from moto.s3.exceptions import InvalidTargetBucketForLogging, CrossLocationLoggingProhibitted + # Target bucket must exist in the same account (assuming all moto buckets are in the same account): + if not bucket_backend.buckets.get(logging_config["TargetBucket"]): + raise InvalidTargetBucketForLogging("The target bucket for logging does not exist.") + + # Does the target bucket have the log-delivery WRITE and READ_ACP permissions? + write = read_acp = False + for grant in bucket_backend.buckets[logging_config["TargetBucket"]].acl.grants: + # Must be granted to: http://acs.amazonaws.com/groups/s3/LogDelivery + for grantee in grant.grantees: + if grantee.uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + if "WRITE" in grant.permissions or "FULL_CONTROL" in grant.permissions: + write = True + + if "READ_ACP" in grant.permissions or "FULL_CONTROL" in grant.permissions: + read_acp = True + + break + + if not write or not read_acp: + raise InvalidTargetBucketForLogging("You must give the log-delivery group WRITE and READ_ACP" + " permissions to the target bucket") + + # Buckets must also exist within the same region: + if bucket_backend.buckets[logging_config["TargetBucket"]].region_name != self.region_name: + raise CrossLocationLoggingProhibitted() + + # Checks pass -- set the logging config: + self.logging = logging_config + + def set_notification_configuration(self, notification_config): + if not notification_config: + self.notification_configuration = None + return + + self.notification_configuration = NotificationConfiguration( + topic=notification_config.get("TopicConfiguration"), + queue=notification_config.get("QueueConfiguration"), + cloud_function=notification_config.get("CloudFunctionConfiguration") + ) + + # Validate that the region is correct: + for thing in ["topic", "queue", "cloud_function"]: + for t in getattr(self.notification_configuration, thing): + region = t.arn.split(":")[3] + if region != self.region_name: + raise InvalidNotificationDestination() + def set_website_configuration(self, website_configuration): self.website_configuration = website_configuration @@ -608,10 +742,18 @@ class S3Backend(BaseBackend): bucket = self.get_bucket(bucket_name) bucket.set_cors(cors_rules) + def put_bucket_logging(self, bucket_name, logging_config): + bucket = self.get_bucket(bucket_name) + bucket.set_logging(logging_config, self) + def delete_bucket_cors(self, bucket_name): bucket = self.get_bucket(bucket_name) bucket.delete_cors() + def put_bucket_notification_configuration(self, bucket_name, notification_config): + bucket = self.get_bucket(bucket_name) + bucket.set_notification_configuration(notification_config) + def initiate_multipart(self, bucket_name, key_name, metadata): bucket = self.get_bucket(bucket_name) new_multipart = FakeMultipart(key_name, metadata) @@ -683,6 +825,7 @@ class S3Backend(BaseBackend): else: key_results.add(key) + key_results = filter(lambda key: not isinstance(key, FakeDeleteMarker), key_results) key_results = sorted(key_results, key=lambda key: key.name) folder_results = [folder_name for folder_name in sorted( folder_results, key=lambda key: key)] diff --git a/moto/s3/responses.py b/moto/s3/responses.py index 6abb4f2d1..02a9ac40e 100755 --- a/moto/s3/responses.py +++ b/moto/s3/responses.py @@ -4,22 +4,24 @@ import re import six from moto.core.utils import str_to_rfc_1123_datetime -from six.moves.urllib.parse import parse_qs, urlparse +from six.moves.urllib.parse import parse_qs, urlparse, unquote import xmltodict from moto.packages.httpretty.core import HTTPrettyRequest from moto.core.responses import _TemplateEnvironmentMixin -from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys +from moto.s3bucket_path.utils import bucket_name_from_url as bucketpath_bucket_name_from_url, \ + parse_key_name as bucketpath_parse_key_name, is_delete_keys as bucketpath_is_delete_keys - -from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder -from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, FakeTag -from .utils import bucket_name_from_url, metadata_from_headers +from .exceptions import BucketAlreadyExists, S3ClientError, MissingBucket, MissingKey, InvalidPartOrder, MalformedXML, \ + MalformedACLError, InvalidNotificationARN, InvalidNotificationEvent +from .models import s3_backend, get_canned_acl, FakeGrantee, FakeGrant, FakeAcl, FakeKey, FakeTagging, FakeTagSet, \ + FakeTag +from .utils import bucket_name_from_url, metadata_from_headers, parse_region_from_url from xml.dom import minidom -REGION_URL_REGEX = r'\.s3-(.+?)\.amazonaws\.com' + DEFAULT_REGION_NAME = 'us-east-1' @@ -55,10 +57,11 @@ class ResponseObject(_TemplateEnvironmentMixin): if not host: host = urlparse(request.url).netloc - if (not host or host.startswith('localhost') or + if (not host or host.startswith('localhost') or host.startswith('localstack') or re.match(r'^[^.]+$', host) or re.match(r'^.*\.svc\.cluster\.local$', host)): - # Default to path-based buckets for (1) localhost, (2) local host names that do not - # contain a "." (e.g., Docker container host names), or (3) kubernetes host names + # Default to path-based buckets for (1) localhost, (2) localstack hosts (e.g. localstack.dev), + # (3) local host names that do not contain a "." (e.g., Docker container host names), or + # (4) kubernetes host names return False match = re.match(r'^([^\[\]:]+)(:\d+)?$', host) @@ -70,8 +73,9 @@ class ResponseObject(_TemplateEnvironmentMixin): match = re.match(r'^\[(.+)\](:\d+)?$', host) if match: - match = re.match(r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', - match.groups()[0], re.IGNORECASE) + match = re.match( + r'^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)([\dA-F]{1,4}(\3|:\b)|\2){5}(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z', + match.groups()[0], re.IGNORECASE) if match: return False @@ -125,10 +129,7 @@ class ResponseObject(_TemplateEnvironmentMixin): parsed_url = urlparse(full_url) querystring = parse_qs(parsed_url.query, keep_blank_values=True) method = request.method - region_name = DEFAULT_REGION_NAME - region_match = re.search(REGION_URL_REGEX, full_url) - if region_match: - region_name = region_match.groups()[0] + region_name = parse_region_from_url(full_url) bucket_name = self.parse_bucket_name_from_url(request, full_url) if not bucket_name: @@ -169,7 +170,7 @@ class ResponseObject(_TemplateEnvironmentMixin): # HEAD (which the real API responds with), and instead # raises NoSuchBucket, leading to inconsistency in # error response between real and mocked responses. - return 404, {}, "Not Found" + return 404, {}, "" return 200, {}, "" def _bucket_response_get(self, bucket_name, querystring, headers): @@ -229,6 +230,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_TAGGING_RESPONSE) return template.render(bucket=bucket) + elif 'logging' in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.logging: + template = self.response_template(S3_NO_LOGGING_CONFIG) + return 200, {}, template.render() + template = self.response_template(S3_LOGGING_CONFIG) + return 200, {}, template.render(logging=bucket.logging) elif "cors" in querystring: bucket = self.backend.get_bucket(bucket_name) if len(bucket.cors) == 0: @@ -236,6 +244,13 @@ class ResponseObject(_TemplateEnvironmentMixin): return 404, {}, template.render(bucket_name=bucket_name) template = self.response_template(S3_BUCKET_CORS_RESPONSE) return template.render(bucket=bucket) + elif "notification" in querystring: + bucket = self.backend.get_bucket(bucket_name) + if not bucket.notification_configuration: + return 200, {}, "" + template = self.response_template(S3_GET_BUCKET_NOTIFICATION_CONFIG) + return template.render(bucket=bucket) + elif 'versions' in querystring: delimiter = querystring.get('delimiter', [None])[0] encoding_type = querystring.get('encoding-type', [None])[0] @@ -324,8 +339,7 @@ class ResponseObject(_TemplateEnvironmentMixin): limit = continuation_token or start_after result_keys = self._get_results_from_token(result_keys, limit) - result_keys, is_truncated, \ - next_continuation_token = self._truncate_result(result_keys, max_keys) + result_keys, is_truncated, next_continuation_token = self._truncate_result(result_keys, max_keys) return template.render( bucket=bucket, @@ -380,8 +394,11 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_policy(bucket_name, body) return 'True' elif 'acl' in querystring: - # TODO: Support the XML-based ACL format - self.backend.set_bucket_acl(bucket_name, self._acl_from_headers(request.headers)) + # Headers are first. If not set, then look at the body (consistent with the documentation): + acls = self._acl_from_headers(request.headers) + if not acls: + acls = self._acl_from_xml(body) + self.backend.set_bucket_acl(bucket_name, acls) return "" elif "tagging" in querystring: tagging = self._bucket_tagging_from_xml(body) @@ -391,12 +408,27 @@ class ResponseObject(_TemplateEnvironmentMixin): self.backend.set_bucket_website_configuration(bucket_name, body) return "" elif "cors" in querystring: - from moto.s3.exceptions import MalformedXML try: self.backend.put_bucket_cors(bucket_name, self._cors_from_xml(body)) return "" except KeyError: raise MalformedXML() + elif "logging" in querystring: + try: + self.backend.put_bucket_logging(bucket_name, self._logging_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + elif "notification" in querystring: + try: + self.backend.put_bucket_notification_configuration(bucket_name, + self._notification_config_from_xml(body)) + return "" + except KeyError: + raise MalformedXML() + except Exception as e: + raise e + else: if body: try: @@ -515,6 +547,7 @@ class ResponseObject(_TemplateEnvironmentMixin): def toint(i): return int(i) if i else None + begin, end = map(toint, rspec.split('-')) if begin is not None: # byte range end = last if end is None else min(end, last) @@ -631,7 +664,7 @@ class ResponseObject(_TemplateEnvironmentMixin): upload_id = query['uploadId'][0] part_number = int(query['partNumber'][0]) if 'x-amz-copy-source' in request.headers: - src = request.headers.get("x-amz-copy-source").lstrip("/") + src = unquote(request.headers.get("x-amz-copy-source")).lstrip("/") src_bucket, src_key = src.split("/", 1) src_range = request.headers.get( 'x-amz-copy-source-range', '').split("bytes=")[-1] @@ -673,7 +706,7 @@ class ResponseObject(_TemplateEnvironmentMixin): if 'x-amz-copy-source' in request.headers: # Copy key - src_key_parsed = urlparse(request.headers.get("x-amz-copy-source")) + src_key_parsed = urlparse(unquote(request.headers.get("x-amz-copy-source"))) src_bucket, src_key = src_key_parsed.path.lstrip("/").split("/", 1) src_version_id = parse_qs(src_key_parsed.query).get( 'versionId', [None])[0] @@ -731,6 +764,58 @@ class ResponseObject(_TemplateEnvironmentMixin): else: return 404, response_headers, "" + def _acl_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + if not parsed_xml.get("AccessControlPolicy"): + raise MalformedACLError() + + # The owner is needed for some reason... + if not parsed_xml["AccessControlPolicy"].get("Owner"): + # TODO: Validate that the Owner is actually correct. + raise MalformedACLError() + + # If empty, then no ACLs: + if parsed_xml["AccessControlPolicy"].get("AccessControlList") is None: + return [] + + if not parsed_xml["AccessControlPolicy"]["AccessControlList"].get("Grant"): + raise MalformedACLError() + + permissions = [ + "READ", + "WRITE", + "READ_ACP", + "WRITE_ACP", + "FULL_CONTROL" + ] + + if not isinstance(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], list): + parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"] = \ + [parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"]] + + grants = self._get_grants_from_xml(parsed_xml["AccessControlPolicy"]["AccessControlList"]["Grant"], + MalformedACLError, permissions) + return FakeAcl(grants) + + def _get_grants_from_xml(self, grant_list, exception_type, permissions): + grants = [] + for grant in grant_list: + if grant.get("Permission", "") not in permissions: + raise exception_type() + + if grant["Grantee"].get("@xsi:type", "") not in ["CanonicalUser", "AmazonCustomerByEmail", "Group"]: + raise exception_type() + + # TODO: Verify that the proper grantee data is supplied based on the type. + + grants.append(FakeGrant( + [FakeGrantee(id=grant["Grantee"].get("ID", ""), display_name=grant["Grantee"].get("DisplayName", ""), + uri=grant["Grantee"].get("URI", ""))], + [grant["Permission"]]) + ) + + return grants + def _acl_from_headers(self, headers): canned_acl = headers.get('x-amz-acl', '') if canned_acl: @@ -814,6 +899,110 @@ class ResponseObject(_TemplateEnvironmentMixin): return [parsed_xml["CORSConfiguration"]["CORSRule"]] + def _logging_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not parsed_xml["BucketLoggingStatus"].get("LoggingEnabled"): + return {} + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetBucket"): + raise MalformedXML() + + if not parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetPrefix"): + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetPrefix"] = "" + + # Get the ACLs: + if parsed_xml["BucketLoggingStatus"]["LoggingEnabled"].get("TargetGrants"): + permissions = [ + "READ", + "WRITE", + "FULL_CONTROL" + ] + if not isinstance(parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], list): + target_grants = self._get_grants_from_xml( + [parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"]], + MalformedXML, + permissions + ) + else: + target_grants = self._get_grants_from_xml( + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"]["Grant"], + MalformedXML, + permissions + ) + + parsed_xml["BucketLoggingStatus"]["LoggingEnabled"]["TargetGrants"] = target_grants + + return parsed_xml["BucketLoggingStatus"]["LoggingEnabled"] + + def _notification_config_from_xml(self, xml): + parsed_xml = xmltodict.parse(xml) + + if not len(parsed_xml["NotificationConfiguration"]): + return {} + + # The types of notifications, and their required fields (apparently lambda is categorized by the API as + # "CloudFunction"): + notification_fields = [ + ("Topic", "sns"), + ("Queue", "sqs"), + ("CloudFunction", "lambda") + ] + + event_names = [ + 's3:ReducedRedundancyLostObject', + 's3:ObjectCreated:*', + 's3:ObjectCreated:Put', + 's3:ObjectCreated:Post', + 's3:ObjectCreated:Copy', + 's3:ObjectCreated:CompleteMultipartUpload', + 's3:ObjectRemoved:*', + 's3:ObjectRemoved:Delete', + 's3:ObjectRemoved:DeleteMarkerCreated' + ] + + found_notifications = 0 # Tripwire -- if this is not ever set, then there were no notifications + for name, arn_string in notification_fields: + # 1st verify that the proper notification configuration has been passed in (with an ARN that is close + # to being correct -- nothing too complex in the ARN logic): + the_notification = parsed_xml["NotificationConfiguration"].get("{}Configuration".format(name)) + if the_notification: + found_notifications += 1 + if not isinstance(the_notification, list): + the_notification = parsed_xml["NotificationConfiguration"]["{}Configuration".format(name)] \ + = [the_notification] + + for n in the_notification: + if not n[name].startswith("arn:aws:{}:".format(arn_string)): + raise InvalidNotificationARN() + + # 2nd, verify that the Events list is correct: + assert n["Event"] + if not isinstance(n["Event"], list): + n["Event"] = [n["Event"]] + + for event in n["Event"]: + if event not in event_names: + raise InvalidNotificationEvent() + + # Parse out the filters: + if n.get("Filter"): + # Error if S3Key is blank: + if not n["Filter"]["S3Key"]: + raise KeyError() + + if not isinstance(n["Filter"]["S3Key"]["FilterRule"], list): + n["Filter"]["S3Key"]["FilterRule"] = [n["Filter"]["S3Key"]["FilterRule"]] + + for filter_rule in n["Filter"]["S3Key"]["FilterRule"]: + assert filter_rule["Name"] in ["suffix", "prefix"] + assert filter_rule["Value"] + + if not found_notifications: + return {} + + return parsed_xml["NotificationConfiguration"] + def _key_response_delete(self, bucket_name, query, key_name, headers): if query.get('uploadId'): upload_id = query['uploadId'][0] @@ -987,7 +1176,30 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% for rule in rules %} {{ rule.id }} + {% if rule.filter %} + + {{ rule.filter.prefix }} + {% if rule.filter.tag %} + + {{ rule.filter.tag.key }} + {{ rule.filter.tag.value }} + + {% endif %} + {% if rule.filter.and_filter %} + + {{ rule.filter.and_filter.prefix }} + {% for tag in rule.filter.and_filter.tags %} + + {{ tag.key }} + {{ tag.value }} + + {% endfor %} + + {% endif %} + + {% else %} {{ rule.prefix if rule.prefix != None }} + {% endif %} {{ rule.status }} {% if rule.storage_class %} @@ -1000,7 +1212,7 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {{ rule.storage_class }} {% endif %} - {% if rule.expiration_days or rule.expiration_date %} + {% if rule.expiration_days or rule.expiration_date or rule.expired_object_delete_marker %} {% if rule.expiration_days %} {{ rule.expiration_days }} @@ -1008,6 +1220,9 @@ S3_BUCKET_LIFECYCLE_CONFIGURATION = """ {% if rule.expiration_date %} {{ rule.expiration_date }} {% endif %} + {% if rule.expired_object_delete_marker %} + {{ rule.expired_object_delete_marker }} + {% endif %} {% endif %} @@ -1322,3 +1537,105 @@ S3_NO_CORS_CONFIG = """ 9Gjjt1m+cjU4OPvX9O9/8RuvnG41MRb/18Oux2o5H5MY7ISNTlXN+Dz9IG62/ILVxhAGI0qyPfg= """ + +S3_LOGGING_CONFIG = """ + + + {{ logging["TargetBucket"] }} + {{ logging["TargetPrefix"] }} + {% if logging.get("TargetGrants") %} + + {% for grant in logging["TargetGrants"] %} + + + {% if grant.grantees[0].uri %} + {{ grant.grantees[0].uri }} + {% endif %} + {% if grant.grantees[0].id %} + {{ grant.grantees[0].id }} + {% endif %} + {% if grant.grantees[0].display_name %} + {{ grant.grantees[0].display_name }} + {% endif %} + + {{ grant.permissions[0] }} + + {% endfor %} + + {% endif %} + + +""" + +S3_NO_LOGGING_CONFIG = """ + +""" + +S3_GET_BUCKET_NOTIFICATION_CONFIG = """ + + {% for topic in bucket.notification_configuration.topic %} + + {{ topic.id }} + {{ topic.arn }} + {% for event in topic.events %} + {{ event }} + {% endfor %} + {% if topic.filters %} + + + {% for rule in topic.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for queue in bucket.notification_configuration.queue %} + + {{ queue.id }} + {{ queue.arn }} + {% for event in queue.events %} + {{ event }} + {% endfor %} + {% if queue.filters %} + + + {% for rule in queue.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + {% for cf in bucket.notification_configuration.cloud_function %} + + {{ cf.id }} + {{ cf.arn }} + {% for event in cf.events %} + {{ event }} + {% endfor %} + {% if cf.filters %} + + + {% for rule in cf.filters["S3Key"]["FilterRule"] %} + + {{ rule["Name"] }} + {{ rule["Value"] }} + + {% endfor %} + + + {% endif %} + + {% endfor %} + +""" diff --git a/moto/s3/utils.py b/moto/s3/utils.py index a121eae3a..85a812aad 100644 --- a/moto/s3/utils.py +++ b/moto/s3/utils.py @@ -1,4 +1,6 @@ from __future__ import unicode_literals +import logging +import os from boto.s3.key import Key import re @@ -6,10 +8,16 @@ import six from six.moves.urllib.parse import urlparse, unquote import sys + +log = logging.getLogger(__name__) + + bucket_name_regex = re.compile("(.+).s3(.*).amazonaws.com") def bucket_name_from_url(url): + if os.environ.get('S3_IGNORE_SUBDOMAIN_BUCKETNAME', '') in ['1', 'true']: + return None domain = urlparse(url).netloc if domain.startswith('www.'): @@ -27,6 +35,20 @@ def bucket_name_from_url(url): return None +REGION_URL_REGEX = re.compile( + r'^https?://(s3[-\.](?P.+)\.amazonaws\.com/(.+)|' + r'(.+)\.s3-(?P.+)\.amazonaws\.com)/?') + + +def parse_region_from_url(url): + match = REGION_URL_REGEX.search(url) + if match: + region = match.group('region1') or match.group('region2') + else: + region = 'us-east-1' + return region + + def metadata_from_headers(headers): metadata = {} meta_regex = re.compile( diff --git a/moto/sns/models.py b/moto/sns/models.py index 3d6f6507e..1c1be6680 100644 --- a/moto/sns/models.py +++ b/moto/sns/models.py @@ -4,11 +4,12 @@ import datetime import uuid import json -import boto.sns import requests import six import re +from boto3 import Session + from moto.compat import OrderedDict from moto.core import BaseBackend, BaseModel from moto.core.utils import iso_8601_datetime_with_milliseconds @@ -42,11 +43,12 @@ class Topic(BaseModel): self.subscriptions_confimed = 0 self.subscriptions_deleted = 0 - def publish(self, message, subject=None): + def publish(self, message, subject=None, message_attributes=None): message_id = six.text_type(uuid.uuid4()) subscriptions, _ = self.sns_backend.list_subscriptions(self.arn) for subscription in subscriptions: - subscription.publish(message, message_id, subject=subject) + subscription.publish(message, message_id, subject=subject, + message_attributes=message_attributes) return message_id def get_cfn_attribute(self, attribute_name): @@ -81,25 +83,65 @@ class Subscription(BaseModel): self.protocol = protocol self.arn = make_arn_for_subscription(self.topic.arn) self.attributes = {} + self._filter_policy = None # filter policy as a dict, not json. self.confirmed = False - def publish(self, message, message_id, subject=None): + def publish(self, message, message_id, subject=None, + message_attributes=None): + if not self._matches_filter_policy(message_attributes): + return + if self.protocol == 'sqs': queue_name = self.endpoint.split(":")[-1] region = self.endpoint.split(":")[3] - enveloped_message = json.dumps(self.get_post_data(message, message_id, subject), sort_keys=True, indent=2, separators=(',', ': ')) + enveloped_message = json.dumps(self.get_post_data(message, message_id, subject, message_attributes=message_attributes), sort_keys=True, indent=2, separators=(',', ': ')) sqs_backends[region].send_message(queue_name, enveloped_message) elif self.protocol in ['http', 'https']: post_data = self.get_post_data(message, message_id, subject) requests.post(self.endpoint, json=post_data) elif self.protocol == 'lambda': # TODO: support bad function name - function_name = self.endpoint.split(":")[-1] - region = self.arn.split(':')[3] - lambda_backends[region].send_message(function_name, message, subject=subject) + # http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html + arr = self.endpoint.split(":") + region = arr[3] + qualifier = None + if len(arr) == 7: + assert arr[5] == 'function' + function_name = arr[-1] + elif len(arr) == 8: + assert arr[5] == 'function' + qualifier = arr[-1] + function_name = arr[-2] + else: + assert False - def get_post_data(self, message, message_id, subject): - return { + lambda_backends[region].send_message(function_name, message, subject=subject, qualifier=qualifier) + + def _matches_filter_policy(self, message_attributes): + # TODO: support Anything-but matching, prefix matching and + # numeric value matching. + if not self._filter_policy: + return True + + if message_attributes is None: + message_attributes = {} + + def _field_match(field, rules, message_attributes): + if field not in message_attributes: + return False + for rule in rules: + if isinstance(rule, six.string_types): + # only string value matching is supported + if message_attributes[field]['Value'] == rule: + return True + return False + + return all(_field_match(field, rules, message_attributes) + for field, rules in six.iteritems(self._filter_policy)) + + def get_post_data( + self, message, message_id, subject, message_attributes=None): + post_data = { "Type": "Notification", "MessageId": message_id, "TopicArn": self.topic.arn, @@ -111,6 +153,9 @@ class Subscription(BaseModel): "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem", "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55" } + if message_attributes: + post_data["MessageAttributes"] = message_attributes + return post_data class PlatformApplication(BaseModel): @@ -247,11 +292,21 @@ class SNSBackend(BaseBackend): setattr(topic, attribute_name, attribute_value) def subscribe(self, topic_arn, endpoint, protocol): + # AWS doesn't create duplicates + old_subscription = self._find_subscription(topic_arn, endpoint, protocol) + if old_subscription: + return old_subscription topic = self.get_topic(topic_arn) subscription = Subscription(topic, endpoint, protocol) self.subscriptions[subscription.arn] = subscription return subscription + def _find_subscription(self, topic_arn, endpoint, protocol): + for subscription in self.subscriptions.values(): + if subscription.topic.arn == topic_arn and subscription.endpoint == endpoint and subscription.protocol == protocol: + return subscription + return None + def unsubscribe(self, subscription_arn): self.subscriptions.pop(subscription_arn) @@ -264,13 +319,15 @@ class SNSBackend(BaseBackend): else: return self._get_values_nexttoken(self.subscriptions, next_token) - def publish(self, arn, message, subject=None): - if subject is not None and len(subject) >= 100: + def publish(self, arn, message, subject=None, message_attributes=None): + if subject is not None and len(subject) > 100: + # Note that the AWS docs around length are wrong: https://github.com/spulec/moto/issues/1503 raise ValueError('Subject must be less than 100 characters') try: topic = self.get_topic(arn) - message_id = topic.publish(message, subject=subject) + message_id = topic.publish(message, subject=subject, + message_attributes=message_attributes) except SNSNotFoundError: endpoint = self.get_endpoint(arn) message_id = endpoint.publish(message) @@ -342,7 +399,7 @@ class SNSBackend(BaseBackend): return subscription.attributes def set_subscription_attributes(self, arn, name, value): - if name not in ['RawMessageDelivery', 'DeliveryPolicy']: + if name not in ['RawMessageDelivery', 'DeliveryPolicy', 'FilterPolicy']: raise SNSInvalidParameter('AttributeName') # TODO: should do validation @@ -353,10 +410,13 @@ class SNSBackend(BaseBackend): subscription.attributes[name] = value + if name == 'FilterPolicy': + subscription._filter_policy = json.loads(value) + sns_backends = {} -for region in boto.sns.regions(): - sns_backends[region.name] = SNSBackend(region.name) +for region in Session().get_available_regions('sns'): + sns_backends[region] = SNSBackend(region) DEFAULT_TOPIC_POLICY = { diff --git a/moto/sns/responses.py b/moto/sns/responses.py index 3b4aade80..035d56584 100644 --- a/moto/sns/responses.py +++ b/moto/sns/responses.py @@ -6,7 +6,7 @@ from collections import defaultdict from moto.core.responses import BaseResponse from moto.core.utils import camelcase_to_underscores from .models import sns_backends -from .exceptions import SNSNotFoundError +from .exceptions import SNSNotFoundError, InvalidParameterValue from .utils import is_e164 @@ -30,6 +30,49 @@ class SNSResponse(BaseResponse): in attributes ) + def _parse_message_attributes(self, prefix='', value_namespace='Value.'): + message_attributes = self._get_object_map( + 'MessageAttributes.entry', + name='Name', + value='Value' + ) + # SNS converts some key names before forwarding messages + # DataType -> Type, StringValue -> Value, BinaryValue -> Value + transformed_message_attributes = {} + for name, value in message_attributes.items(): + # validation + data_type = value['DataType'] + if not data_type: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value.".format(name)) + + data_type_parts = data_type.split('.') + if (len(data_type_parts) > 2 or + data_type_parts[0] not in ['String', 'Binary', 'Number']): + raise InvalidParameterValue( + "The message attribute '{0}' has an invalid message " + "attribute type, the set of supported type prefixes is " + "Binary, Number, and String.".format(name)) + + transform_value = None + if 'StringValue' in value: + transform_value = value['StringValue'] + elif 'BinaryValue' in value: + transform_value = value['BinaryValue'] + if not transform_value: + raise InvalidParameterValue( + "The message attribute '{0}' must contain non-empty " + "message attribute value for message attribute " + "type '{1}'.".format(name, data_type[0])) + + # transformation + transformed_message_attributes[name] = { + 'Type': data_type, 'Value': transform_value + } + + return transformed_message_attributes + def create_topic(self): name = self._get_param('Name') topic = self.backend.create_topic(name) @@ -241,6 +284,8 @@ class SNSResponse(BaseResponse): phone_number = self._get_param('PhoneNumber') subject = self._get_param('Subject') + message_attributes = self._parse_message_attributes() + if phone_number is not None: # Check phone is correct syntax (e164) if not is_e164(phone_number): @@ -265,7 +310,9 @@ class SNSResponse(BaseResponse): message = self._get_param('Message') try: - message_id = self.backend.publish(arn, message, subject=subject) + message_id = self.backend.publish( + arn, message, subject=subject, + message_attributes=message_attributes) except ValueError as err: error_response = self._error('InvalidParameter', str(err)) return error_response, dict(status=400) diff --git a/moto/sqs/models.py b/moto/sqs/models.py index 85b69ab0e..9c8858bc0 100644 --- a/moto/sqs/models.py +++ b/moto/sqs/models.py @@ -38,6 +38,8 @@ class Message(BaseModel): self.sent_timestamp = None self.approximate_first_receive_timestamp = None self.approximate_receive_count = 0 + self.deduplication_id = None + self.group_id = None self.visible_at = 0 self.delayed_until = 0 @@ -152,63 +154,86 @@ class Message(BaseModel): class Queue(BaseModel): - camelcase_attributes = ['ApproximateNumberOfMessages', - 'ApproximateNumberOfMessagesDelayed', - 'ApproximateNumberOfMessagesNotVisible', - 'ContentBasedDeduplication', - 'CreatedTimestamp', - 'DelaySeconds', - 'FifoQueue', - 'KmsDataKeyReusePeriodSeconds', - 'KmsMasterKeyId', - 'LastModifiedTimestamp', - 'MaximumMessageSize', - 'MessageRetentionPeriod', - 'QueueArn', - 'ReceiveMessageWaitTimeSeconds', - 'VisibilityTimeout', - 'WaitTimeSeconds'] - ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', 'GetQueueAttributes', - 'GetQueueUrl', 'ReceiveMessage', 'SendMessage') + base_attributes = ['ApproximateNumberOfMessages', + 'ApproximateNumberOfMessagesDelayed', + 'ApproximateNumberOfMessagesNotVisible', + 'CreatedTimestamp', + 'DelaySeconds', + 'LastModifiedTimestamp', + 'MaximumMessageSize', + 'MessageRetentionPeriod', + 'QueueArn', + 'ReceiveMessageWaitTimeSeconds', + 'VisibilityTimeout'] + fifo_attributes = ['FifoQueue', + 'ContentBasedDeduplication'] + kms_attributes = ['KmsDataKeyReusePeriodSeconds', + 'KmsMasterKeyId'] + ALLOWED_PERMISSIONS = ('*', 'ChangeMessageVisibility', 'DeleteMessage', + 'GetQueueAttributes', 'GetQueueUrl', + 'ReceiveMessage', 'SendMessage') def __init__(self, name, region, **kwargs): self.name = name - self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30)) self.region = region self.tags = {} + self.permissions = {} self._messages = [] now = unix_time() - - # kwargs can also have: - # [Policy, RedrivePolicy] - self.fifo_queue = kwargs.get('FifoQueue', 'false') == 'true' - self.content_based_deduplication = kwargs.get('ContentBasedDeduplication', 'false') == 'true' - self.kms_master_key_id = kwargs.get('KmsMasterKeyId', 'alias/aws/sqs') - self.kms_data_key_reuse_period_seconds = int(kwargs.get('KmsDataKeyReusePeriodSeconds', 300)) self.created_timestamp = now - self.delay_seconds = int(kwargs.get('DelaySeconds', 0)) - self.last_modified_timestamp = now - self.maximum_message_size = int(kwargs.get('MaximumMessageSize', 64 << 10)) - self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4)) # four days - self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name) - self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0)) - self.permissions = {} - - # wait_time_seconds will be set to immediate return messages - self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0)) - - self.redrive_policy = {} + self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, + self.name) self.dead_letter_queue = None - if 'RedrivePolicy' in kwargs: - self._setup_dlq(kwargs['RedrivePolicy']) + # default settings for a non fifo queue + defaults = { + 'ContentBasedDeduplication': 'false', + 'DelaySeconds': 0, + 'FifoQueue': 'false', + 'KmsDataKeyReusePeriodSeconds': 300, # five minutes + 'KmsMasterKeyId': None, + 'MaximumMessageSize': int(64 << 10), + 'MessageRetentionPeriod': 86400 * 4, # four days + 'Policy': None, + 'ReceiveMessageWaitTimeSeconds': 0, + 'RedrivePolicy': None, + 'VisibilityTimeout': 30, + } + + defaults.update(kwargs) + self._set_attributes(defaults, now) # Check some conditions if self.fifo_queue and not self.name.endswith('.fifo'): raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues') + def _set_attributes(self, attributes, now=None): + if not now: + now = unix_time() + + integer_fields = ('DelaySeconds', 'KmsDataKeyreusePeriodSeconds', + 'MaximumMessageSize', 'MessageRetentionPeriod', + 'ReceiveMessageWaitTime', 'VisibilityTimeout') + bool_fields = ('ContentBasedDeduplication', 'FifoQueue') + + for key, value in six.iteritems(attributes): + if key in integer_fields: + value = int(value) + if key in bool_fields: + value = value == "true" + + if key == 'RedrivePolicy' and value is not None: + continue + + setattr(self, camelcase_to_underscores(key), value) + + if attributes.get('RedrivePolicy', None): + self._setup_dlq(attributes['RedrivePolicy']) + + self.last_modified_timestamp = now + def _setup_dlq(self, policy_json): try: self.redrive_policy = json.loads(policy_json) @@ -251,8 +276,8 @@ class Queue(BaseModel): if 'VisibilityTimeout' in properties: queue.visibility_timeout = int(properties['VisibilityTimeout']) - if 'WaitTimeSeconds' in properties: - queue.wait_time_seconds = int(properties['WaitTimeSeconds']) + if 'ReceiveMessageWaitTimeSeconds' in properties: + queue.receive_message_wait_time_seconds = int(properties['ReceiveMessageWaitTimeSeconds']) return queue @classmethod @@ -281,11 +306,31 @@ class Queue(BaseModel): @property def attributes(self): result = {} - for attribute in self.camelcase_attributes: + + for attribute in self.base_attributes: attr = getattr(self, camelcase_to_underscores(attribute)) - if isinstance(attr, bool): - attr = str(attr).lower() result[attribute] = attr + + if self.fifo_queue: + for attribute in self.fifo_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.kms_master_key_id: + for attribute in self.kms_attributes: + attr = getattr(self, camelcase_to_underscores(attribute)) + result[attribute] = attr + + if self.policy: + result['Policy'] = self.policy + + if self.redrive_policy: + result['RedrivePolicy'] = json.dumps(self.redrive_policy) + + for key in result: + if isinstance(result[key], bool): + result[key] = str(result[key]).lower() + return result def url(self, request_url): @@ -352,12 +397,12 @@ class SQSBackend(BaseBackend): return self.queues.pop(queue_name) return False - def set_queue_attribute(self, queue_name, key, value): + def set_queue_attributes(self, queue_name, attributes): queue = self.get_queue(queue_name) - setattr(queue, key, value) + queue._set_attributes(attributes) return queue - def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None): + def send_message(self, queue_name, message_body, message_attributes=None, delay_seconds=None, deduplication_id=None, group_id=None): queue = self.get_queue(queue_name) @@ -369,6 +414,12 @@ class SQSBackend(BaseBackend): message_id = get_random_message_id() message = Message(message_id, message_body) + # Attributes, but not *message* attributes + if deduplication_id is not None: + message.deduplication_id = deduplication_id + if group_id is not None: + message.group_id = group_id + if message_attributes: message.message_attributes = message_attributes diff --git a/moto/sqs/responses.py b/moto/sqs/responses.py index b31681f16..c489d7118 100644 --- a/moto/sqs/responses.py +++ b/moto/sqs/responses.py @@ -4,7 +4,7 @@ import re from six.moves.urllib.parse import urlparse from moto.core.responses import BaseResponse -from moto.core.utils import camelcase_to_underscores, amz_crc32, amzn_request_id +from moto.core.utils import amz_crc32, amzn_request_id from .utils import parse_message_attributes from .models import sqs_backends from .exceptions import ( @@ -30,7 +30,7 @@ class SQSResponse(BaseResponse): @property def attribute(self): if not hasattr(self, '_attribute'): - self._attribute = self._get_map_prefix('Attribute', key_end='Name', value_end='Value') + self._attribute = self._get_map_prefix('Attribute', key_end='.Name', value_end='.Value') return self._attribute def _get_queue_name(self): @@ -87,7 +87,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) if queue: template = self.response_template(GET_QUEUE_URL_RESPONSE) @@ -171,7 +172,8 @@ class SQSResponse(BaseResponse): try: queue = self.sqs_backend.get_queue(queue_name) except QueueDoesNotExist as e: - return self._error('QueueDoesNotExist', e.description) + return self._error('AWS.SimpleQueueService.NonExistentQueue', + e.description) template = self.response_template(GET_QUEUE_ATTRIBUTES_RESPONSE) return template.render(queue=queue) @@ -179,9 +181,8 @@ class SQSResponse(BaseResponse): def set_queue_attributes(self): # TODO validate self.get_param('QueueUrl') queue_name = self._get_queue_name() - for key, value in self.attribute.items(): - key = camelcase_to_underscores(key) - self.sqs_backend.set_queue_attribute(queue_name, key, value) + self.sqs_backend.set_queue_attributes(queue_name, self.attribute) + return SET_QUEUE_ATTRIBUTE_RESPONSE def delete_queue(self): @@ -197,6 +198,8 @@ class SQSResponse(BaseResponse): def send_message(self): message = self._get_param('MessageBody') delay_seconds = int(self._get_param('DelaySeconds', 0)) + message_group_id = self._get_param("MessageGroupId") + message_dedupe_id = self._get_param("MessageDeduplicationId") if len(message) > MAXIMUM_MESSAGE_LENGTH: return ERROR_TOO_LONG_RESPONSE, dict(status=400) @@ -212,7 +215,9 @@ class SQSResponse(BaseResponse): queue_name, message, message_attributes=message_attributes, - delay_seconds=delay_seconds + delay_seconds=delay_seconds, + deduplication_id=message_dedupe_id, + group_id=message_group_id ) template = self.response_template(SEND_MESSAGE_RESPONSE) return template.render(message=message, message_attributes=message_attributes) @@ -320,10 +325,26 @@ class SQSResponse(BaseResponse): except TypeError: message_count = DEFAULT_RECEIVED_MESSAGES + if message_count < 1 or message_count > 10: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "MaxNumberOfMessages is invalid. Reason: must be between " + "1 and 10, if provided." % message_count) + try: wait_time = int(self.querystring.get("WaitTimeSeconds")[0]) except TypeError: - wait_time = queue.wait_time_seconds + wait_time = queue.receive_message_wait_time_seconds + + if wait_time < 0 or wait_time > 20: + return self._error( + "InvalidParameterValue", + "An error occurred (InvalidParameterValue) when calling " + "the ReceiveMessage operation: Value %s for parameter " + "WaitTimeSeconds is invalid. Reason: must be <= 0 and " + ">= 20 if provided." % wait_time) try: visibility_timeout = self._get_validated_visibility_timeout() @@ -490,6 +511,18 @@ RECEIVE_MESSAGE_RESPONSE = """ ApproximateFirstReceiveTimestamp {{ message.approximate_first_receive_timestamp }} + {% if message.deduplication_id is not none %} + + MessageDeduplicationId + {{ message.deduplication_id }} + + {% endif %} + {% if message.group_id is not none %} + + MessageGroupId + {{ message.group_id }} + + {% endif %} {% if message.message_attributes.items()|count > 0 %} {{- message.attribute_md5 -}} {% endif %} diff --git a/moto/ssm/models.py b/moto/ssm/models.py index d8dc10a4b..aaeccc887 100644 --- a/moto/ssm/models.py +++ b/moto/ssm/models.py @@ -5,7 +5,9 @@ from collections import defaultdict from moto.core import BaseBackend, BaseModel from moto.ec2 import ec2_backends +import datetime import time +import uuid class Parameter(BaseModel): @@ -91,7 +93,7 @@ class SimpleSystemManagerBackend(BaseBackend): result.append(self._parameters[name]) return result - def get_parameters_by_path(self, path, with_decryption, recursive): + def get_parameters_by_path(self, path, with_decryption, recursive, filters=None): """Implement the get-parameters-by-path-API in the backend.""" result = [] # path could be with or without a trailing /. we handle this @@ -102,10 +104,35 @@ class SimpleSystemManagerBackend(BaseBackend): continue if '/' in param[len(path) + 1:] and not recursive: continue + if not self._match_filters(self._parameters[param], filters): + continue result.append(self._parameters[param]) return result + @staticmethod + def _match_filters(parameter, filters=None): + """Return True if the given parameter matches all the filters""" + for filter_obj in (filters or []): + key = filter_obj['Key'] + option = filter_obj.get('Option', 'Equals') + values = filter_obj.get('Values', []) + + what = None + if key == 'Type': + what = parameter.type + elif key == 'KeyId': + what = parameter.keyid + + if option == 'Equals'\ + and not any(what == value for value in values): + return False + elif option == 'BeginsWith'\ + and not any(what.startswith(value) for value in values): + return False + # True if no false match (or no filters at all) + return True + def get_parameter(self, name, with_decryption): if name in self._parameters: return self._parameters[name] @@ -124,6 +151,7 @@ class SimpleSystemManagerBackend(BaseBackend): last_modified_date = time.time() self._parameters[name] = Parameter( name, value, type, description, keyid, last_modified_date, version) + return version def add_tags_to_resource(self, resource_type, resource_id, tags): for key, value in tags.items(): @@ -138,6 +166,39 @@ class SimpleSystemManagerBackend(BaseBackend): def list_tags_for_resource(self, resource_type, resource_id): return self._resource_tags[resource_type][resource_id] + def send_command(self, **kwargs): + instances = kwargs.get('InstanceIds', []) + now = datetime.datetime.now() + expires_after = now + datetime.timedelta(0, int(kwargs.get('TimeoutSeconds', 3600))) + return { + 'Command': { + 'CommandId': str(uuid.uuid4()), + 'DocumentName': kwargs['DocumentName'], + 'Comment': kwargs.get('Comment'), + 'ExpiresAfter': expires_after.isoformat(), + 'Parameters': kwargs['Parameters'], + 'InstanceIds': kwargs['InstanceIds'], + 'Targets': kwargs.get('targets'), + 'RequestedDateTime': now.isoformat(), + 'Status': 'Success', + 'StatusDetails': 'string', + 'OutputS3Region': kwargs.get('OutputS3Region'), + 'OutputS3BucketName': kwargs.get('OutputS3BucketName'), + 'OutputS3KeyPrefix': kwargs.get('OutputS3KeyPrefix'), + 'MaxConcurrency': 'string', + 'MaxErrors': 'string', + 'TargetCount': len(instances), + 'CompletedCount': len(instances), + 'ErrorCount': 0, + 'ServiceRole': kwargs.get('ServiceRoleArn'), + 'NotificationConfig': { + 'NotificationArn': 'string', + 'NotificationEvents': ['Success'], + 'NotificationType': 'Command' + } + } + } + ssm_backends = {} for region, ec2_backend in ec2_backends.items(): diff --git a/moto/ssm/responses.py b/moto/ssm/responses.py index 0b4ca3b65..e35eca5ee 100644 --- a/moto/ssm/responses.py +++ b/moto/ssm/responses.py @@ -85,9 +85,10 @@ class SimpleSystemManagerResponse(BaseResponse): path = self._get_param('Path') with_decryption = self._get_param('WithDecryption') recursive = self._get_param('Recursive', False) + filters = self._get_param('ParameterFilters') result = self.ssm_backend.get_parameters_by_path( - path, with_decryption, recursive + path, with_decryption, recursive, filters ) response = { @@ -162,9 +163,18 @@ class SimpleSystemManagerResponse(BaseResponse): keyid = self._get_param('KeyId') overwrite = self._get_param('Overwrite', False) - self.ssm_backend.put_parameter( + result = self.ssm_backend.put_parameter( name, description, value, type_, keyid, overwrite) - return json.dumps({}) + + if result is None: + error = { + '__type': 'ParameterAlreadyExists', + 'message': 'Parameter {0} already exists.'.format(name) + } + return json.dumps(error), dict(status=400) + + response = {'Version': result} + return json.dumps(response) def add_tags_to_resource(self): resource_id = self._get_param('ResourceId') @@ -190,3 +200,8 @@ class SimpleSystemManagerResponse(BaseResponse): tag_list = [{'Key': k, 'Value': v} for (k, v) in tags.items()] response = {'TagList': tag_list} return json.dumps(response) + + def send_command(self): + return json.dumps( + self.ssm_backend.send_command(**self.request_params) + ) diff --git a/moto/swf/models/__init__.py b/moto/swf/models/__init__.py index 833596a23..a8bc57f40 100644 --- a/moto/swf/models/__init__.py +++ b/moto/swf/models/__init__.py @@ -21,7 +21,7 @@ from .history_event import HistoryEvent # flake8: noqa from .timeout import Timeout # flake8: noqa from .workflow_type import WorkflowType # flake8: noqa from .workflow_execution import WorkflowExecution # flake8: noqa - +from time import sleep KNOWN_SWF_TYPES = { "activity": ActivityType, @@ -198,6 +198,9 @@ class SWFBackend(BaseBackend): wfe.start_decision_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_decision_tasks(self, domain_name, task_list): @@ -293,6 +296,9 @@ class SWFBackend(BaseBackend): wfe.start_activity_task(task.task_token, identity=identity) return task else: + # Sleeping here will prevent clients that rely on the timeout from + # entering in a busy waiting loop. + sleep(1) return None def count_pending_activity_tasks(self, domain_name, task_list): @@ -379,6 +385,14 @@ class SWFBackend(BaseBackend): if details: activity_task.details = details + def signal_workflow_execution(self, domain_name, signal_name, workflow_id, input=None, run_id=None): + # process timeouts on all objects + self._process_timeouts() + domain = self._get_domain(domain_name) + wfe = domain.get_workflow_execution( + workflow_id, run_id=run_id, raise_if_closed=True) + wfe.signal(signal_name, input) + swf_backends = {} for region in boto.swf.regions(): diff --git a/moto/swf/models/history_event.py b/moto/swf/models/history_event.py index 0dc21a09a..e7ddfd924 100644 --- a/moto/swf/models/history_event.py +++ b/moto/swf/models/history_event.py @@ -25,6 +25,7 @@ SUPPORTED_HISTORY_EVENT_TYPES = ( "ActivityTaskTimedOut", "DecisionTaskTimedOut", "WorkflowExecutionTimedOut", + "WorkflowExecutionSignaled" ) diff --git a/moto/swf/models/workflow_execution.py b/moto/swf/models/workflow_execution.py index 2f41c287f..3d01f9192 100644 --- a/moto/swf/models/workflow_execution.py +++ b/moto/swf/models/workflow_execution.py @@ -599,6 +599,14 @@ class WorkflowExecution(BaseModel): self.close_status = "TERMINATED" self.close_cause = "OPERATOR_INITIATED" + def signal(self, signal_name, input): + self._add_event( + "WorkflowExecutionSignaled", + signal_name=signal_name, + input=input, + ) + self.schedule_decision_task() + def first_timeout(self): if not self.open or not self.start_timestamp: return None diff --git a/moto/swf/responses.py b/moto/swf/responses.py index 1ee89bfc1..6f002d3d4 100644 --- a/moto/swf/responses.py +++ b/moto/swf/responses.py @@ -326,9 +326,9 @@ class SWFResponse(BaseResponse): _workflow_type = self._params["workflowType"] workflow_name = _workflow_type["name"] workflow_version = _workflow_type["version"] - _default_task_list = self._params.get("defaultTaskList") - if _default_task_list: - task_list = _default_task_list.get("name") + _task_list = self._params.get("taskList") + if _task_list: + task_list = _task_list.get("name") else: task_list = None child_policy = self._params.get("childPolicy") @@ -507,3 +507,20 @@ class SWFResponse(BaseResponse): ) # TODO: make it dynamic when we implement activity tasks cancellation return json.dumps({"cancelRequested": False}) + + def signal_workflow_execution(self): + domain_name = self._params["domain"] + signal_name = self._params["signalName"] + workflow_id = self._params["workflowId"] + _input = self._params["input"] + run_id = self._params["runId"] + + self._check_string(domain_name) + self._check_string(signal_name) + self._check_string(workflow_id) + self._check_none_or_string(_input) + self._check_none_or_string(run_id) + + self.swf_backend.signal_workflow_execution( + domain_name, signal_name, workflow_id, _input, run_id) + return "" diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py index 6e2164d63..135796054 100644 --- a/moto/xray/mock_client.py +++ b/moto/xray/mock_client.py @@ -51,7 +51,7 @@ def mock_xray_client(f): aws_xray_sdk.core.xray_recorder._emitter = MockEmitter() try: - f(*args, **kwargs) + return f(*args, **kwargs) finally: if old_xray_context_var is None: diff --git a/requirements-dev.txt b/requirements-dev.txt index 1b151eb29..655be0616 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -8,7 +8,7 @@ freezegun flask boto>=2.45.0 boto3>=1.4.4 -botocore>=1.5.77 +botocore>=1.8.36 six>=1.9 prompt-toolkit==1.0.14 click==6.7 diff --git a/scripts/bump_version b/scripts/bump_version new file mode 100755 index 000000000..d1af3a84b --- /dev/null +++ b/scripts/bump_version @@ -0,0 +1,27 @@ +#!/bin/bash + +main() { + set -euo pipefail # Bash safemode + + local version=$1 + if [[ -z "${version}" ]]; then + echo "USAGE: $0 1.3.2" + echo "Provide a new version number as an argument to bump the version" + echo -n "Current:" + grep version= setup.py + return 1 + fi + + &>/dev/null which bumpversion || pip install bumpversion + bumpversion --new-version ${version} patch + + git checkout -b version-${version} + # Commit the new version + git commit -a -m "bumping to version ${version}" + # Commit an updated IMPLEMENTATION_COVERAGE.md + make implementation_coverage || true + # Open a PR + open https://github.com/spulec/moto/compare/master...version-${version} +} + +main $@ diff --git a/setup.cfg b/setup.cfg index 3c6e79cf3..fb04c16a8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,8 @@ +[nosetests] +verbosity=1 +detailed-errors=1 +with-coverage=1 +cover-package=moto + [bdist_wheel] universal=1 diff --git a/setup.py b/setup.py index 201622627..ebbf6f0cd 100755 --- a/setup.py +++ b/setup.py @@ -6,10 +6,10 @@ import sys install_requires = [ - "Jinja2>=2.8", + "Jinja2>=2.7.3", "boto>=2.36.0", - "boto3>=1.2.1", - "botocore>=1.7.12", + "boto3>=1.6.16", + "botocore>=1.9.16", "cookies", "cryptography>=2.0.0", "requests>=2.5", @@ -22,7 +22,8 @@ install_requires = [ "mock", "docker>=2.5.1", "jsondiff==1.1.1", - "aws-xray-sdk>=0.93", + "aws-xray-sdk<0.96,>=0.93", + "responses", ] extras_require = { @@ -39,7 +40,7 @@ else: setup( name='moto', - version='1.1.25', + version='1.3.3', description='A library that allows your python tests to easily' ' mock out the boto library', author='Steve Pulec', diff --git a/tests/test_apigateway/test_apigateway.py b/tests/test_apigateway/test_apigateway.py index 11230658b..3f75b3ebd 100644 --- a/tests/test_apigateway/test_apigateway.py +++ b/tests/test_apigateway/test_apigateway.py @@ -1,15 +1,13 @@ from __future__ import unicode_literals -from datetime import datetime -from dateutil.tz import tzutc import boto3 from freezegun import freeze_time import requests import sure # noqa from botocore.exceptions import ClientError -from moto.packages.responses import responses +import responses from moto import mock_apigateway, settings @@ -965,3 +963,35 @@ def test_http_proxying_integration(): if not settings.TEST_SERVER_MODE: requests.get(deploy_url).content.should.equal(b"a fake response") + + +@mock_apigateway +def test_api_keys(): + region_name = 'us-west-2' + client = boto3.client('apigateway', region_name=region_name) + response = client.get_api_keys() + len(response['items']).should.equal(0) + + apikey_value = '12345' + apikey_name = 'TESTKEY1' + payload = {'value': apikey_value, 'name': apikey_name} + response = client.create_api_key(**payload) + apikey = client.get_api_key(apiKey=response['id']) + apikey['name'].should.equal(apikey_name) + apikey['value'].should.equal(apikey_value) + + apikey_name = 'TESTKEY2' + payload = {'name': apikey_name, 'generateDistinctId': True} + response = client.create_api_key(**payload) + apikey_id = response['id'] + apikey = client.get_api_key(apiKey=apikey_id) + apikey['name'].should.equal(apikey_name) + len(apikey['value']).should.equal(40) + + response = client.get_api_keys() + len(response['items']).should.equal(2) + + client.delete_api_key(apiKey=apikey_id) + + response = client.get_api_keys() + len(response['items']).should.equal(1) diff --git a/tests/test_autoscaling/test_autoscaling.py b/tests/test_autoscaling/test_autoscaling.py index 5ed6c3aa5..f86ca2b81 100644 --- a/tests/test_autoscaling/test_autoscaling.py +++ b/tests/test_autoscaling/test_autoscaling.py @@ -11,10 +11,13 @@ import sure # noqa from moto import mock_autoscaling, mock_ec2_deprecated, mock_elb_deprecated, mock_elb, mock_autoscaling_deprecated, mock_ec2 from tests.helpers import requires_boto_gte +from utils import setup_networking, setup_networking_deprecated + @mock_autoscaling_deprecated @mock_elb_deprecated def test_create_autoscaling_group(): + mocked_networking = setup_networking_deprecated() elb_conn = boto.ec2.elb.connect_to_region('us-east-1') elb_conn.create_load_balancer( 'test_lb', zones=[], listeners=[(80, 8080, 'http')]) @@ -39,7 +42,7 @@ def test_create_autoscaling_group(): launch_config=config, load_balancers=["test_lb"], placement_group="test_placement", - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], termination_policies=["OldestInstance", "NewestInstance"], tags=[Tag( resource_id='tester_group', @@ -59,7 +62,7 @@ def test_create_autoscaling_group(): group.max_size.should.equal(2) group.min_size.should.equal(2) group.instances.should.have.length_of(2) - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.launch_config_name.should.equal('tester') group.default_cooldown.should.equal(60) group.health_check_period.should.equal(100) @@ -80,6 +83,8 @@ def test_create_autoscaling_group(): def test_create_autoscaling_groups_defaults(): """ Test with the minimum inputs and check that all of the proper defaults are assigned for the other attributes """ + + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -93,6 +98,7 @@ def test_create_autoscaling_groups_defaults(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -105,7 +111,7 @@ def test_create_autoscaling_groups_defaults(): # Defaults list(group.availability_zones).should.equal([]) group.desired_capacity.should.equal(2) - group.vpc_zone_identifier.should.equal('') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.default_cooldown.should.equal(300) group.health_check_period.should.equal(300) group.health_check_type.should.equal("EC2") @@ -117,6 +123,7 @@ def test_create_autoscaling_groups_defaults(): @mock_autoscaling def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -124,7 +131,8 @@ def test_list_many_autoscaling_groups(): conn.create_auto_scaling_group(AutoScalingGroupName='TestGroup%d' % i, MinSize=1, MaxSize=2, - LaunchConfigurationName='TestLC') + LaunchConfigurationName='TestLC', + VPCZoneIdentifier=mocked_networking['subnet1']) response = conn.describe_auto_scaling_groups() groups = response["AutoScalingGroups"] @@ -142,6 +150,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling @mock_ec2 def test_list_many_autoscaling_groups(): + mocked_networking = setup_networking() conn = boto3.client('autoscaling', region_name='us-east-1') conn.create_launch_configuration(LaunchConfigurationName='TestLC') @@ -155,7 +164,8 @@ def test_list_many_autoscaling_groups(): "PropagateAtLaunch": True, "Key": 'TestTagKey1', "Value": 'TestTagValue1' - }]) + }], + VPCZoneIdentifier=mocked_networking['subnet1']) ec2 = boto3.client('ec2', region_name='us-east-1') instances = ec2.describe_instances() @@ -167,6 +177,7 @@ def test_list_many_autoscaling_groups(): @mock_autoscaling_deprecated def test_autoscaling_group_describe_filter(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -180,6 +191,7 @@ def test_autoscaling_group_describe_filter(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group.name = 'tester_group2' @@ -194,6 +206,7 @@ def test_autoscaling_group_describe_filter(): @mock_autoscaling_deprecated def test_autoscaling_update(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -209,12 +222,12 @@ def test_autoscaling_update(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] - group.vpc_zone_identifier.should.equal('subnet-1234abcd') + group.vpc_zone_identifier.should.equal(mocked_networking['subnet1']) group.vpc_zone_identifier = 'subnet-5678efgh' group.update() @@ -225,6 +238,7 @@ def test_autoscaling_update(): @mock_autoscaling_deprecated def test_autoscaling_tags_update(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -240,13 +254,13 @@ def test_autoscaling_tags_update(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', tags=[Tag( resource_id='tester_group', key='test_key', value='test_value', propagate_at_launch=True )], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -267,6 +281,7 @@ def test_autoscaling_tags_update(): @mock_autoscaling_deprecated def test_autoscaling_group_delete(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -280,6 +295,7 @@ def test_autoscaling_group_delete(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -292,6 +308,7 @@ def test_autoscaling_group_delete(): @mock_ec2_deprecated @mock_autoscaling_deprecated def test_autoscaling_group_describe_instances(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -305,6 +322,7 @@ def test_autoscaling_group_describe_instances(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -326,6 +344,7 @@ def test_autoscaling_group_describe_instances(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_up(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -341,7 +360,7 @@ def test_set_desired_capacity_up(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -361,6 +380,7 @@ def test_set_desired_capacity_up(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_down(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -376,7 +396,7 @@ def test_set_desired_capacity_down(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -396,6 +416,7 @@ def test_set_desired_capacity_down(): @requires_boto_gte("2.8") @mock_autoscaling_deprecated def test_set_desired_capacity_the_same(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -411,7 +432,7 @@ def test_set_desired_capacity_the_same(): max_size=2, min_size=2, launch_config=config, - vpc_zone_identifier='subnet-1234abcd', + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) @@ -431,6 +452,7 @@ def test_set_desired_capacity_the_same(): @mock_autoscaling_deprecated @mock_elb_deprecated def test_autoscaling_group_with_elb(): + mocked_networking = setup_networking_deprecated() elb_conn = boto.connect_elb() zones = ['us-east-1a', 'us-east-1b'] ports = [(80, 8080, 'http'), (443, 8443, 'tcp')] @@ -451,6 +473,7 @@ def test_autoscaling_group_with_elb(): min_size=2, launch_config=config, load_balancers=["my-lb"], + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) group = conn.get_all_groups()[0] @@ -488,6 +511,7 @@ Boto3 @mock_autoscaling @mock_elb def test_describe_load_balancers(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -514,7 +538,8 @@ def test_describe_load_balancers(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_load_balancers(AutoScalingGroupName='test_asg') @@ -524,6 +549,7 @@ def test_describe_load_balancers(): @mock_autoscaling @mock_elb def test_create_elb_and_autoscaling_group_no_relationship(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 ELB_NAME = 'my-elb' @@ -546,6 +572,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, + VPCZoneIdentifier=mocked_networking['subnet1'], ) # autoscaling group and elb should have no relationship @@ -562,6 +589,7 @@ def test_create_elb_and_autoscaling_group_no_relationship(): @mock_autoscaling @mock_elb def test_attach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -587,7 +615,8 @@ def test_attach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.attach_load_balancers( @@ -609,6 +638,7 @@ def test_attach_load_balancer(): @mock_autoscaling @mock_elb def test_detach_load_balancer(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 elb_client = boto3.client('elb', region_name='us-east-1') @@ -635,7 +665,8 @@ def test_detach_load_balancer(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.detach_load_balancers( @@ -654,6 +685,7 @@ def test_detach_load_balancer(): @mock_autoscaling def test_create_autoscaling_group_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -676,13 +708,15 @@ def test_create_autoscaling_group_boto3(): 'Key': 'not-propogated-tag-key', 'Value': 'not-propogate-tag-value', 'PropagateAtLaunch': False - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response['ResponseMetadata']['HTTPStatusCode'].should.equal(200) @mock_autoscaling def test_describe_autoscaling_groups_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -692,7 +726,8 @@ def test_describe_autoscaling_groups_boto3(): LaunchConfigurationName='test_launch_configuration', MinSize=0, MaxSize=20, - DesiredCapacity=5 + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=["test_asg"] @@ -704,6 +739,7 @@ def test_describe_autoscaling_groups_boto3(): @mock_autoscaling def test_update_autoscaling_group_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -713,7 +749,8 @@ def test_update_autoscaling_group_boto3(): LaunchConfigurationName='test_launch_configuration', MinSize=0, MaxSize=20, - DesiredCapacity=5 + DesiredCapacity=5, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.update_auto_scaling_group( @@ -729,6 +766,7 @@ def test_update_autoscaling_group_boto3(): @mock_autoscaling def test_autoscaling_taqs_update_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -739,14 +777,13 @@ def test_autoscaling_taqs_update_boto3(): MinSize=0, MaxSize=20, DesiredCapacity=5, - Tags=[ - { - "ResourceId": 'test_asg', - "Key": 'test_key', - "Value": 'test_value', - "PropagateAtLaunch": True - }, - ] + Tags=[{ + "ResourceId": 'test_asg', + "Key": 'test_key', + "Value": 'test_value', + "PropagateAtLaunch": True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.create_or_update_tags(Tags=[{ @@ -769,6 +806,7 @@ def test_autoscaling_taqs_update_boto3(): @mock_autoscaling def test_autoscaling_describe_policies_boto3(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -784,7 +822,8 @@ def test_autoscaling_describe_policies_boto3(): "Key": 'test_key', "Value": 'test_value', "PropagateAtLaunch": True - }] + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) client.put_scaling_policy( @@ -825,6 +864,7 @@ def test_autoscaling_describe_policies_boto3(): @mock_autoscaling @mock_ec2 def test_detach_one_instance_decrement(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -835,13 +875,14 @@ def test_detach_one_instance_decrement(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -878,6 +919,7 @@ def test_detach_one_instance_decrement(): @mock_autoscaling @mock_ec2 def test_detach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -888,13 +930,14 @@ def test_detach_one_instance(): MinSize=0, MaxSize=2, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -930,6 +973,7 @@ def test_detach_one_instance(): @mock_autoscaling @mock_ec2 def test_attach_one_instance(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -940,13 +984,14 @@ def test_attach_one_instance(): MinSize=0, MaxSize=4, DesiredCapacity=2, - Tags=[ - {'ResourceId': 'test_asg', - 'ResourceType': 'auto-scaling-group', - 'Key': 'propogated-tag-key', - 'Value': 'propogate-tag-value', - 'PropagateAtLaunch': True - }] + Tags=[{ + 'ResourceId': 'test_asg', + 'ResourceType': 'auto-scaling-group', + 'Key': 'propogated-tag-key', + 'Value': 'propogate-tag-value', + 'PropagateAtLaunch': True + }], + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( AutoScalingGroupNames=['test_asg'] @@ -969,6 +1014,7 @@ def test_attach_one_instance(): @mock_autoscaling @mock_ec2 def test_describe_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -979,6 +1025,7 @@ def test_describe_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( @@ -991,6 +1038,7 @@ def test_describe_instance_health(): @mock_autoscaling @mock_ec2 def test_set_instance_health(): + mocked_networking = setup_networking() client = boto3.client('autoscaling', region_name='us-east-1') _ = client.create_launch_configuration( LaunchConfigurationName='test_launch_configuration' @@ -1001,6 +1049,7 @@ def test_set_instance_health(): MinSize=2, MaxSize=4, DesiredCapacity=2, + VPCZoneIdentifier=mocked_networking['subnet1'], ) response = client.describe_auto_scaling_groups( @@ -1018,3 +1067,36 @@ def test_set_instance_health(): instance1 = response['AutoScalingGroups'][0]['Instances'][0] instance1['HealthStatus'].should.equal('Unhealthy') + +@mock_autoscaling +def test_suspend_processes(): + mocked_networking = setup_networking() + client = boto3.client('autoscaling', region_name='us-east-1') + client.create_launch_configuration( + LaunchConfigurationName='lc', + ) + client.create_auto_scaling_group( + LaunchConfigurationName='lc', + AutoScalingGroupName='test-asg', + MinSize=1, + MaxSize=1, + VPCZoneIdentifier=mocked_networking['subnet1'], + ) + + # When we suspend the 'Launch' process on the ASG client + client.suspend_processes( + AutoScalingGroupName='test-asg', + ScalingProcesses=['Launch'] + ) + + res = client.describe_auto_scaling_groups( + AutoScalingGroupNames=['test-asg'] + ) + + # The 'Launch' process should, in fact, be suspended + launch_suspended = False + for proc in res['AutoScalingGroups'][0]['SuspendedProcesses']: + if proc.get('ProcessName') == 'Launch': + launch_suspended = True + + assert launch_suspended is True diff --git a/tests/test_autoscaling/test_elbv2.py b/tests/test_autoscaling/test_elbv2.py index 89ec4a399..3a50484c1 100644 --- a/tests/test_autoscaling/test_elbv2.py +++ b/tests/test_autoscaling/test_elbv2.py @@ -1,24 +1,24 @@ from __future__ import unicode_literals import boto3 +import sure # noqa from moto import mock_autoscaling, mock_ec2, mock_elbv2 +from utils import setup_networking + @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_attach_detach_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -40,7 +40,7 @@ def test_attach_detach_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet1']) # create asg without attaching to target group client.create_auto_scaling_group( AutoScalingGroupName='test_asg2', @@ -48,7 +48,7 @@ def test_attach_detach_target_groups(): MinSize=0, MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['subnet2']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') @@ -74,21 +74,18 @@ def test_attach_detach_target_groups(): list(response['TargetHealthDescriptions']).should.have.length_of(INSTANCE_COUNT) @mock_elbv2 -@mock_ec2 @mock_autoscaling def test_detach_all_target_groups(): + mocked_networking = setup_networking() INSTANCE_COUNT = 2 client = boto3.client('autoscaling', region_name='us-east-1') elbv2_client = boto3.client('elbv2', region_name='us-east-1') - ec2 = boto3.resource('ec2', region_name='us-east-1') - - vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') response = elbv2_client.create_target_group( Name='a-target', Protocol='HTTP', Port=8080, - VpcId=vpc.id, + VpcId=mocked_networking['vpc'], HealthCheckProtocol='HTTP', HealthCheckPort='8080', HealthCheckPath='/', @@ -109,7 +106,7 @@ def test_detach_all_target_groups(): MaxSize=INSTANCE_COUNT, DesiredCapacity=INSTANCE_COUNT, TargetGroupARNs=[target_group_arn], - VPCZoneIdentifier=vpc.id) + VPCZoneIdentifier=mocked_networking['vpc']) response = client.describe_load_balancer_target_groups( AutoScalingGroupName='test_asg') diff --git a/tests/test_autoscaling/test_policies.py b/tests/test_autoscaling/test_policies.py index 54c64b749..e6b01163f 100644 --- a/tests/test_autoscaling/test_policies.py +++ b/tests/test_autoscaling/test_policies.py @@ -7,8 +7,11 @@ import sure # noqa from moto import mock_autoscaling_deprecated +from utils import setup_networking_deprecated + def setup_autoscale_group(): + mocked_networking = setup_networking_deprecated() conn = boto.connect_autoscale() config = LaunchConfiguration( name='tester', @@ -22,6 +25,7 @@ def setup_autoscale_group(): max_size=2, min_size=2, launch_config=config, + vpc_zone_identifier=mocked_networking['subnet1'], ) conn.create_auto_scaling_group(group) return group diff --git a/tests/test_autoscaling/utils.py b/tests/test_autoscaling/utils.py new file mode 100644 index 000000000..b167ba5f5 --- /dev/null +++ b/tests/test_autoscaling/utils.py @@ -0,0 +1,27 @@ +import boto +import boto3 +from moto import mock_ec2, mock_ec2_deprecated + + +@mock_ec2 +def setup_networking(): + ec2 = boto3.resource('ec2', region_name='us-east-1') + vpc = ec2.create_vpc(CidrBlock='10.11.0.0/16') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.1.0/24', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='10.11.2.0/24', + AvailabilityZone='us-east-1b') + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + +@mock_ec2_deprecated +def setup_networking_deprecated(): + conn = boto.connect_vpc() + vpc = conn.create_vpc("10.11.0.0/16") + subnet1 = conn.create_subnet(vpc.id, "10.11.1.0/24") + subnet2 = conn.create_subnet(vpc.id, "10.11.2.0/24") + return {'vpc': vpc.id, 'subnet1': subnet1.id, 'subnet2': subnet2.id} + diff --git a/tests/test_awslambda/test_lambda.py b/tests/test_awslambda/test_lambda.py index e7a9f9174..8ea9cc6fd 100644 --- a/tests/test_awslambda/test_lambda.py +++ b/tests/test_awslambda/test_lambda.py @@ -6,11 +6,12 @@ import boto3 import hashlib import io import json +import time import zipfile import sure # noqa from freezegun import freeze_time -from moto import mock_lambda, mock_s3, mock_ec2, settings +from moto import mock_lambda, mock_s3, mock_ec2, mock_sns, mock_logs, settings _lambda_region = 'us-west-2' @@ -48,6 +49,15 @@ def lambda_handler(event, context): return _process_lambda(func_str) +def get_test_zip_file3(): + pfunc = """ +def lambda_handler(event, context): + print("get_test_zip_file3 success") + return event +""" + return _process_lambda(pfunc) + + @mock_lambda def test_list_functions(): conn = boto3.client('lambda', 'us-west-2') @@ -160,6 +170,56 @@ if settings.TEST_SERVER_MODE: payload.should.equal(msg) +@mock_logs +@mock_sns +@mock_ec2 +@mock_lambda +def test_invoke_function_from_sns(): + logs_conn = boto3.client("logs", region_name="us-west-2") + sns_conn = boto3.client("sns", region_name="us-west-2") + sns_conn.create_topic(Name="some-topic") + topics_json = sns_conn.list_topics() + topics = topics_json["Topics"] + topic_arn = topics[0]['TopicArn'] + + conn = boto3.client('lambda', 'us-west-2') + result = conn.create_function( + FunctionName='testFunction', + Runtime='python2.7', + Role='test-iam-role', + Handler='lambda_function.lambda_handler', + Code={ + 'ZipFile': get_test_zip_file3(), + }, + Description='test lambda function', + Timeout=3, + MemorySize=128, + Publish=True, + ) + + sns_conn.subscribe(TopicArn=topic_arn, Protocol="lambda", Endpoint=result['FunctionArn']) + + result = sns_conn.publish(TopicArn=topic_arn, Message=json.dumps({})) + + start = time.time() + while (time.time() - start) < 30: + result = logs_conn.describe_log_streams(logGroupName='/aws/lambda/testFunction') + log_streams = result.get('logStreams') + if not log_streams: + time.sleep(1) + continue + + assert len(log_streams) == 1 + result = logs_conn.get_log_events(logGroupName='/aws/lambda/testFunction', logStreamName=log_streams[0]['logStreamName']) + for event in result.get('events'): + if event['message'] == 'get_test_zip_file3 success': + return + + time.sleep(1) + + assert False, "Test Failed" + + @mock_lambda def test_create_based_on_s3_with_missing_bucket(): conn = boto3.client('lambda', 'us-west-2') @@ -420,7 +480,6 @@ def test_publish(): function_list['Functions'][0]['FunctionArn'].should.contain('testFunction:$LATEST') - @mock_lambda @mock_s3 @freeze_time('2015-01-01 00:00:00') @@ -674,7 +733,7 @@ def test_get_function_created_with_zipfile(): "CodeSha256": hashlib.sha256(zip_content).hexdigest(), "CodeSize": len(zip_content), "Description": "test lambda function", - "FunctionArn":'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), + "FunctionArn": 'arn:aws:lambda:{}:123456789012:function:testFunction:$LATEST'.format(_lambda_region), "FunctionName": "testFunction", "Handler": "lambda_function.handler", "MemorySize": 128, diff --git a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py index d8b8cf142..1dbf80fb5 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py +++ b/tests/test_cloudformation/test_cloudformation_stack_crud_boto3.py @@ -1,9 +1,11 @@ from __future__ import unicode_literals import json +from collections import OrderedDict import boto3 from botocore.exceptions import ClientError +import sure # noqa # Ensure 'assert_raises' context manager support for Python 2.6 from nose.tools import assert_raises @@ -161,7 +163,7 @@ def test_boto3_create_stack(): ) cf_conn.get_template(StackName="test_stack")['TemplateBody'].should.equal( - dummy_template) + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) @mock_cloudformation @@ -270,9 +272,35 @@ def test_create_stack_from_s3_url(): StackName='stack_from_url', TemplateURL=key_url, ) + cf_conn.get_template(StackName="stack_from_url")['TemplateBody'].should.equal( + json.loads(dummy_template_json, object_pairs_hook=OrderedDict)) - cf_conn.get_template(StackName="stack_from_url")[ - 'TemplateBody'].should.equal(dummy_template) + +@mock_cloudformation +def test_update_stack_with_previous_value(): + name = 'update_stack_with_previous_value' + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + cf_conn.create_stack( + StackName=name, TemplateBody=dummy_template_yaml_with_ref, + Parameters=[ + {'ParameterKey': 'TagName', 'ParameterValue': 'foo'}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'bar'}, + ] + ) + cf_conn.update_stack( + StackName=name, UsePreviousTemplate=True, + Parameters=[ + {'ParameterKey': 'TagName', 'UsePreviousValue': True}, + {'ParameterKey': 'TagDescription', 'ParameterValue': 'not bar'}, + ] + ) + stack = cf_conn.describe_stacks(StackName=name)['Stacks'][0] + tag_name = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagName'][0] + tag_desc = [x['ParameterValue'] for x in stack['Parameters'] + if x['ParameterKey'] == 'TagDescription'][0] + assert tag_name == 'foo' + assert tag_desc == 'not bar' @mock_cloudformation @@ -306,8 +334,8 @@ def test_update_stack_from_s3_url(): TemplateURL=key_url, ) - cf_conn.get_template(StackName="update_stack_from_url")[ - 'TemplateBody'].should.equal(dummy_update_template) + cf_conn.get_template(StackName="update_stack_from_url")[ 'TemplateBody'].should.equal( + json.loads(dummy_update_template_json, object_pairs_hook=OrderedDict)) @mock_cloudformation @@ -337,6 +365,30 @@ def test_create_change_set_from_s3_url(): assert 'arn:aws:cloudformation:us-east-1:123456789:stack/NewStack' in response['StackId'] +@mock_cloudformation +def test_execute_change_set_w_arn(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName=change_set['Id']) + + +@mock_cloudformation +def test_execute_change_set_w_name(): + cf_conn = boto3.client('cloudformation', region_name='us-east-1') + change_set = cf_conn.create_change_set( + StackName='NewStack', + TemplateBody=dummy_template_json, + ChangeSetName='NewChangeSet', + ChangeSetType='CREATE', + ) + cf_conn.execute_change_set(ChangeSetName='NewStack', StackName='NewStack') + + @mock_cloudformation def test_describe_stack_pagination(): conn = boto3.client('cloudformation', region_name='us-east-1') diff --git a/tests/test_cloudformation/test_cloudformation_stack_integration.py b/tests/test_cloudformation/test_cloudformation_stack_integration.py index c4a138de7..2c808726f 100644 --- a/tests/test_cloudformation/test_cloudformation_stack_integration.py +++ b/tests/test_cloudformation/test_cloudformation_stack_integration.py @@ -752,6 +752,9 @@ def test_vpc_single_instance_in_subnet(): security_group.vpc_id.should.equal(vpc.id) stack = conn.describe_stacks()[0] + + vpc.tags.should.have.key('Application').which.should.equal(stack.stack_id) + resources = stack.describe_resources() vpc_resource = [ resource for resource in resources if resource.resource_type == 'AWS::EC2::VPC'][0] @@ -891,19 +894,25 @@ def test_iam_roles(): "my-launch-config": { "Properties": { - "IamInstanceProfile": {"Ref": "my-instance-profile"}, + "IamInstanceProfile": {"Ref": "my-instance-profile-with-path"}, "ImageId": "ami-1234abcd", }, "Type": "AWS::AutoScaling::LaunchConfiguration" }, - "my-instance-profile": { + "my-instance-profile-with-path": { "Properties": { "Path": "my-path", - "Roles": [{"Ref": "my-role"}], + "Roles": [{"Ref": "my-role-with-path"}], }, "Type": "AWS::IAM::InstanceProfile" }, - "my-role": { + "my-instance-profile-no-path": { + "Properties": { + "Roles": [{"Ref": "my-role-no-path"}], + }, + "Type": "AWS::IAM::InstanceProfile" + }, + "my-role-with-path": { "Properties": { "AssumeRolePolicyDocument": { "Statement": [ @@ -961,6 +970,26 @@ def test_iam_roles(): ] }, "Type": "AWS::IAM::Role" + }, + "my-role-no-path": { + "Properties": { + "AssumeRolePolicyDocument": { + "Statement": [ + { + "Action": [ + "sts:AssumeRole" + ], + "Effect": "Allow", + "Principal": { + "Service": [ + "ec2.amazonaws.com" + ] + } + } + ] + }, + }, + "Type": "AWS::IAM::Role" } } } @@ -974,37 +1003,51 @@ def test_iam_roles(): iam_conn = boto.iam.connect_to_region("us-west-1") - role_result = iam_conn.list_roles()['list_roles_response'][ - 'list_roles_result']['roles'][0] - role = iam_conn.get_role(role_result.role_name) - role.role_name.should.contain("my-role") - role.path.should.equal("my-path") + role_results = iam_conn.list_roles()['list_roles_response'][ + 'list_roles_result']['roles'] + role_name_to_id = {} + for role_result in role_results: + role = iam_conn.get_role(role_result.role_name) + role.role_name.should.contain("my-role") + if 'with-path' in role.role_name: + role_name_to_id['with-path'] = role.role_id + role.path.should.equal("my-path") + else: + role_name_to_id['no-path'] = role.role_id + role.role_name.should.contain('no-path') + role.path.should.equal('/') - instance_profile_response = iam_conn.list_instance_profiles()[ - 'list_instance_profiles_response'] - cfn_instance_profile = instance_profile_response[ - 'list_instance_profiles_result']['instance_profiles'][0] - instance_profile = iam_conn.get_instance_profile( - cfn_instance_profile.instance_profile_name) - instance_profile.instance_profile_name.should.contain( - "my-instance-profile") - instance_profile.path.should.equal("my-path") - instance_profile.role_id.should.equal(role.role_id) + instance_profile_responses = iam_conn.list_instance_profiles()[ + 'list_instance_profiles_response']['list_instance_profiles_result']['instance_profiles'] + instance_profile_responses.should.have.length_of(2) + instance_profile_names = [] + + for instance_profile_response in instance_profile_responses: + instance_profile = iam_conn.get_instance_profile(instance_profile_response.instance_profile_name) + instance_profile_names.append(instance_profile.instance_profile_name) + instance_profile.instance_profile_name.should.contain( + "my-instance-profile") + if "with-path" in instance_profile.instance_profile_name: + instance_profile.path.should.equal("my-path") + instance_profile.role_id.should.equal(role_name_to_id['with-path']) + else: + instance_profile.instance_profile_name.should.contain('no-path') + instance_profile.role_id.should.equal(role_name_to_id['no-path']) + instance_profile.path.should.equal('/') autoscale_conn = boto.ec2.autoscale.connect_to_region("us-west-1") launch_config = autoscale_conn.get_all_launch_configurations()[0] - launch_config.instance_profile_name.should.contain("my-instance-profile") + launch_config.instance_profile_name.should.contain("my-instance-profile-with-path") stack = conn.describe_stacks()[0] resources = stack.describe_resources() - instance_profile_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'][0] - instance_profile_resource.physical_resource_id.should.equal( - instance_profile.instance_profile_name) + instance_profile_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::InstanceProfile'] + {ip.physical_resource_id for ip in instance_profile_resources}.should.equal(set(instance_profile_names)) - role_resource = [ - resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'][0] - role_resource.physical_resource_id.should.equal(role.role_id) + role_resources = [ + resource for resource in resources if resource.resource_type == 'AWS::IAM::Role'] + {r.physical_resource_id for r in role_resources}.should.equal(set(role_name_to_id.values())) @mock_ec2_deprecated() @@ -2113,6 +2156,78 @@ def test_stack_spot_fleet(): launch_spec['WeightedCapacity'].should.equal(2.0) +@mock_cloudformation() +@mock_ec2() +def test_stack_spot_fleet_should_figure_out_default_price(): + conn = boto3.client('ec2', 'us-east-1') + + vpc = conn.create_vpc(CidrBlock="10.0.0.0/8")['Vpc'] + subnet = conn.create_subnet( + VpcId=vpc['VpcId'], CidrBlock='10.0.0.0/16', AvailabilityZone='us-east-1a')['Subnet'] + subnet_id = subnet['SubnetId'] + + spot_fleet_template = { + 'Resources': { + "SpotFleet1": { + "Type": "AWS::EC2::SpotFleet", + "Properties": { + "SpotFleetRequestConfigData": { + "IamFleetRole": "arn:aws:iam::123456789012:role/fleet", + "TargetCapacity": 6, + "AllocationStrategy": "diversified", + "LaunchSpecifications": [ + { + "EbsOptimized": "false", + "InstanceType": 't2.small', + "ImageId": "ami-1234", + "SubnetId": subnet_id, + "WeightedCapacity": "2", + }, + { + "EbsOptimized": "true", + "InstanceType": 't2.large', + "ImageId": "ami-1234", + "Monitoring": {"Enabled": "true"}, + "SecurityGroups": [{"GroupId": "sg-123"}], + "SubnetId": subnet_id, + "IamInstanceProfile": {"Arn": "arn:aws:iam::123456789012:role/fleet"}, + "WeightedCapacity": "4", + } + ] + } + } + } + } + } + spot_fleet_template_json = json.dumps(spot_fleet_template) + + cf_conn = boto3.client('cloudformation', 'us-east-1') + stack_id = cf_conn.create_stack( + StackName='test_stack', + TemplateBody=spot_fleet_template_json, + )['StackId'] + + stack_resources = cf_conn.list_stack_resources(StackName=stack_id) + stack_resources['StackResourceSummaries'].should.have.length_of(1) + spot_fleet_id = stack_resources[ + 'StackResourceSummaries'][0]['PhysicalResourceId'] + + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_request['SpotFleetRequestState'].should.equal("active") + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + assert 'SpotPrice' not in spot_fleet_config + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 + + @mock_ec2 @mock_elbv2 @mock_cloudformation diff --git a/tests/test_cloudformation/test_stack_parsing.py b/tests/test_cloudformation/test_stack_parsing.py index d9fe4d80d..af7e608db 100644 --- a/tests/test_cloudformation/test_stack_parsing.py +++ b/tests/test_cloudformation/test_stack_parsing.py @@ -75,6 +75,14 @@ get_attribute_output = { } } +get_availability_zones_output = { + "Outputs": { + "Output1": { + "Value": {"Fn::GetAZs": ""} + } + } +} + split_select_template = { "AWSTemplateFormatVersion": "2010-09-09", "Resources": { @@ -146,6 +154,8 @@ bad_outputs_template = dict( list(dummy_template.items()) + list(bad_output.items())) get_attribute_outputs_template = dict( list(dummy_template.items()) + list(get_attribute_output.items())) +get_availability_zones_template = dict( + list(dummy_template.items()) + list(get_availability_zones_output.items())) dummy_template_json = json.dumps(dummy_template) name_type_template_json = json.dumps(name_type_template) @@ -153,6 +163,8 @@ output_type_template_json = json.dumps(outputs_template) bad_output_template_json = json.dumps(bad_outputs_template) get_attribute_outputs_template_json = json.dumps( get_attribute_outputs_template) +get_availability_zones_template_json = json.dumps( + get_availability_zones_template) split_select_template_json = json.dumps(split_select_template) sub_template_json = json.dumps(sub_template) export_value_template_json = json.dumps(export_value_template) @@ -243,6 +255,21 @@ def test_parse_stack_with_get_attribute_outputs(): output.value.should.equal("my-queue") +def test_parse_stack_with_get_availability_zones(): + stack = FakeStack( + stack_id="test_id", + name="test_stack", + template=get_availability_zones_template_json, + parameters={}, + region_name='us-east-1') + + stack.output_map.should.have.length_of(1) + list(stack.output_map.keys())[0].should.equal('Output1') + output = list(stack.output_map.values())[0] + output.should.be.a(Output) + output.value.should.equal([ "us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d" ]) + + def test_parse_stack_with_bad_get_attribute_outputs(): FakeStack.when.called_with( "test_id", "test_stack", bad_output_template_json, {}, "us-west-1").should.throw(ValidationError) diff --git a/tests/test_cloudwatch/test_cloudwatch_boto3.py b/tests/test_cloudwatch/test_cloudwatch_boto3.py index b2c44cd51..5fbf75749 100644 --- a/tests/test_cloudwatch/test_cloudwatch_boto3.py +++ b/tests/test_cloudwatch/test_cloudwatch_boto3.py @@ -127,12 +127,14 @@ def test_alarm_state(): ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm1') + resp['MetricAlarms'][0]['StateValue'].should.equal('ALARM') resp = client.describe_alarms( StateValue='OK' ) len(resp['MetricAlarms']).should.equal(1) resp['MetricAlarms'][0]['AlarmName'].should.equal('testalarm2') + resp['MetricAlarms'][0]['StateValue'].should.equal('OK') # Just for sanity resp = client.describe_alarms() diff --git a/tests/test_cognitoidentity/test_cognitoidentity.py b/tests/test_cognitoidentity/test_cognitoidentity.py new file mode 100644 index 000000000..a38107b99 --- /dev/null +++ b/tests/test_cognitoidentity/test_cognitoidentity.py @@ -0,0 +1,71 @@ +from __future__ import unicode_literals + +import boto3 + +from moto import mock_cognitoidentity +import sure # noqa + +from moto.cognitoidentity.utils import get_random_identity_id + + +@mock_cognitoidentity +def test_create_identity_pool(): + conn = boto3.client('cognito-identity', 'us-west-2') + + result = conn.create_identity_pool(IdentityPoolName='TestPool', + AllowUnauthenticatedIdentities=False, + SupportedLoginProviders={'graph.facebook.com': '123456789012345'}, + DeveloperProviderName='devname', + OpenIdConnectProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db'], + CognitoIdentityProviders=[ + { + 'ProviderName': 'testprovider', + 'ClientId': 'CLIENT12345', + 'ServerSideTokenCheck': True + }, + ], + SamlProviderARNs=['arn:aws:rds:eu-west-2:123456789012:db:mysql-db']) + assert result['IdentityPoolId'] != '' + + +# testing a helper function +def test_get_random_identity_id(): + assert len(get_random_identity_id('us-west-2')) > 0 + + +@mock_cognitoidentity +def test_get_id(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_id(AccountId='someaccount', + IdentityPoolId='us-west-2:12345', + Logins={ + 'someurl': '12345' + }) + print(result) + assert result.get('IdentityId', "").startswith('us-west-2') or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_credentials_for_identity(): + # These two do NOT work in server mode. They just don't return the data from the model. + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_credentials_for_identity(IdentityId='12345') + + assert result.get('Expiration', 0) > 0 or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + assert result.get('IdentityId') == '12345' or result.get('ResponseMetadata').get('HTTPStatusCode') == 200 + + +@mock_cognitoidentity +def test_get_open_id_token_for_developer_identity(): + conn = boto3.client('cognito-identity', 'us-west-2') + result = conn.get_open_id_token_for_developer_identity( + IdentityPoolId='us-west-2:12345', + IdentityId='12345', + Logins={ + 'someurl': '12345' + }, + TokenDuration=123 + ) + assert len(result['Token']) + assert result['IdentityId'] == '12345' diff --git a/tests/test_cognitoidentity/test_server.py b/tests/test_cognitoidentity/test_server.py new file mode 100644 index 000000000..b63d42bc0 --- /dev/null +++ b/tests/test_cognitoidentity/test_server.py @@ -0,0 +1,45 @@ +from __future__ import unicode_literals + +import json +import sure # noqa + +import moto.server as server +from moto import mock_cognitoidentity + +''' +Test the different server responses +''' + + +@mock_cognitoidentity +def test_create_identity_pool(): + + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data={"IdentityPoolName": "test", "AllowUnauthenticatedIdentities": True}, + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.CreateIdentityPool"}, + ) + + json_data = json.loads(res.data.decode("utf-8")) + assert json_data['IdentityPoolName'] == "test" + + +@mock_cognitoidentity +def test_get_id(): + backend = server.create_backend_app("cognito-identity") + test_client = backend.test_client() + + res = test_client.post('/', + data=json.dumps({'AccountId': 'someaccount', + 'IdentityPoolId': 'us-west-2:12345', + 'Logins': {'someurl': '12345'}}), + headers={ + "X-Amz-Target": "com.amazonaws.cognito.identity.model.AWSCognitoIdentityService.GetId"}, + ) + + print(res.data) + json_data = json.loads(res.data.decode("utf-8")) + assert ':' in json_data['IdentityId'] diff --git a/tests/test_dynamodb2/test_dynamodb.py b/tests/test_dynamodb2/test_dynamodb.py index 05daf23aa..20ff80167 100644 --- a/tests/test_dynamodb2/test_dynamodb.py +++ b/tests/test_dynamodb2/test_dynamodb.py @@ -247,6 +247,33 @@ def test_scan_returns_consumed_capacity(): assert response['ConsumedCapacity']['TableName'] == name +@requires_boto_gte("2.9") +@mock_dynamodb2 +def test_put_item_with_special_chars(): + name = 'TestTable' + conn = boto3.client('dynamodb', + region_name='us-west-2', + aws_access_key_id="ak", + aws_secret_access_key="sk") + + conn.create_table(TableName=name, + KeySchema=[{'AttributeName':'forum_name','KeyType':'HASH'}], + AttributeDefinitions=[{'AttributeName':'forum_name','AttributeType':'S'}], + ProvisionedThroughput={'ReadCapacityUnits':5,'WriteCapacityUnits':5}) + + conn.put_item( + TableName=name, + Item={ + 'forum_name': { 'S': 'LOLCat Forum' }, + 'subject': { 'S': 'Check this out!' }, + 'Body': { 'S': 'http://url_to_lolcat.gif'}, + 'SentBy': { 'S': "test" }, + 'ReceivedTime': { 'S': '12/9/2011 11:36:03 PM'}, + '"': {"S": "foo"}, + } + ) + + @requires_boto_gte("2.9") @mock_dynamodb2 def test_query_returns_consumed_capacity(): @@ -658,6 +685,14 @@ def test_filter_expression(): {':v0': {'N': '7'}} ) filter_expr.expr(row1).should.be(True) + # Expression from to check contains on string value + filter_expr = moto.dynamodb2.comparisons.get_filter_expression( + 'contains(#n0, :v0)', + {'#n0': 'Desc'}, + {':v0': {'S': 'Some'}} + ) + filter_expr.expr(row1).should.be(True) + filter_expr.expr(row2).should.be(False) @mock_dynamodb2 @@ -699,6 +734,11 @@ def test_query_filter(): ) assert response['Count'] == 1 assert response['Items'][0]['app'] == 'app2' + response = table.query( + KeyConditionExpression=Key('client').eq('client1'), + FilterExpression=Attr('app').contains('app') + ) + assert response['Count'] == 2 @mock_dynamodb2 @@ -1067,3 +1107,71 @@ def test_update_item_on_map(): resp = table.scan() resp['Items'][0]['body'].should.equal({'nested': {'data': 'new_value'}}) + + +# https://github.com/spulec/moto/issues/1358 +@mock_dynamodb2 +def test_update_if_not_exists(): + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + + # Create the DynamoDB table. + dynamodb.create_table( + TableName='users', + KeySchema=[ + { + 'AttributeName': 'forum_name', + 'KeyType': 'HASH' + }, + { + 'AttributeName': 'subject', + 'KeyType': 'RANGE' + }, + ], + AttributeDefinitions=[ + { + 'AttributeName': 'forum_name', + 'AttributeType': 'S' + }, + { + 'AttributeName': 'subject', + 'AttributeType': 'S' + }, + ], + ProvisionedThroughput={ + 'ReadCapacityUnits': 5, + 'WriteCapacityUnits': 5 + } + ) + table = dynamodb.Table('users') + + table.put_item(Item={ + 'forum_name': 'the-key', + 'subject': '123' + }) + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 123 + } + ) + + resp = table.scan() + assert resp['Items'][0]['created_at'] == 123 + + table.update_item(Key={ + 'forum_name': 'the-key', + 'subject': '123' + }, + UpdateExpression='SET created_at = if_not_exists(created_at, :created_at)', + ExpressionAttributeValues={ + ':created_at': 456 + } + ) + + resp = table.scan() + # Still the original value + assert resp['Items'][0]['created_at'] == 123 diff --git a/tests/test_ec2/test_amis.py b/tests/test_ec2/test_amis.py old mode 100755 new mode 100644 index 1029ba39e..a8d4d1b67 --- a/tests/test_ec2/test_amis.py +++ b/tests/test_ec2/test_amis.py @@ -1,23 +1,27 @@ from __future__ import unicode_literals -# Ensure 'assert_raises' context manager support for Python 2.6 -import tests.backport_assert_raises # noqa -from nose.tools import assert_raises import boto -import boto3 import boto.ec2 import boto3 -from boto.exception import EC2ResponseError, EC2ResponseError - +from boto.exception import EC2ResponseError +from botocore.exceptions import ClientError +# Ensure 'assert_raises' context manager support for Python 2.6 +from nose.tools import assert_raises import sure # noqa from moto import mock_ec2_deprecated, mock_ec2 +from moto.ec2.models import AMIS from tests.helpers import requires_boto_gte @mock_ec2_deprecated def test_ami_create_and_delete(): conn = boto.connect_ec2('the_key', 'the_secret') + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -32,33 +36,34 @@ def test_ami_create_and_delete(): image_id = conn.create_image(instance.id, "test-ami", "this is a test ami") all_images = conn.get_all_images() - image = all_images[0] + set([i.id for i in all_images]).should.contain(image_id) - image.id.should.equal(image_id) - image.virtualization_type.should.equal(instance.virtualization_type) - image.architecture.should.equal(instance.architecture) - image.kernel_id.should.equal(instance.kernel) - image.platform.should.equal(instance.platform) - image.creationDate.should_not.be.none + retrieved_image = [i for i in all_images if i.id == image_id][0] + + retrieved_image.id.should.equal(image_id) + retrieved_image.virtualization_type.should.equal(instance.virtualization_type) + retrieved_image.architecture.should.equal(instance.architecture) + retrieved_image.kernel_id.should.equal(instance.kernel) + retrieved_image.platform.should.equal(instance.platform) + retrieved_image.creationDate.should_not.be.none instance.terminate() - # Validate auto-created volume and snapshot + # Ensure we're no longer creating a volume volumes = conn.get_all_volumes() - volumes.should.have.length_of(1) - volume = volumes[0] + volumes.should.have.length_of(0) + # Validate auto-created snapshot snapshots = conn.get_all_snapshots() - snapshots.should.have.length_of(1) - snapshot = snapshots[0] + snapshots.should.have.length_of(initial_ami_count + 1) - image.block_device_mapping.current_value.snapshot_id.should.equal( - snapshot.id) + retrieved_image_snapshot_id = retrieved_image.block_device_mapping.current_value.snapshot_id + [s.id for s in snapshots].should.contain(retrieved_image_snapshot_id) + snapshot = [s for s in snapshots if s.id == retrieved_image_snapshot_id][0] snapshot.description.should.equal( - "Auto-created snapshot for AMI {0}".format(image.id)) - snapshot.volume_id.should.equal(volume.id) + "Auto-created snapshot for AMI {0}".format(retrieved_image.id)) # root device should be in AMI's block device mappings - root_mapping = image.block_device_mapping.get(image.root_device_name) + root_mapping = retrieved_image.block_device_mapping.get(retrieved_image.root_device_name) root_mapping.should_not.be.none # Deregister @@ -83,6 +88,11 @@ def test_ami_create_and_delete(): @mock_ec2_deprecated def test_ami_copy(): conn = boto.ec2.connect_to_region("us-west-1") + + initial_ami_count = len(AMIS) + conn.get_all_volumes().should.have.length_of(0) + conn.get_all_snapshots().should.have.length_of(initial_ami_count) + reservation = conn.run_instances('ami-1234abcd') instance = reservation.instances[0] @@ -95,7 +105,8 @@ def test_ami_copy(): # the image_id to fetch the full info. with assert_raises(EC2ResponseError) as ex: copy_image_ref = conn.copy_image( - source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", dry_run=True) + source_image.region.name, source_image.id, "test-copy-ami", "this is a test copy ami", + dry_run=True) ex.exception.error_code.should.equal('DryRunOperation') ex.exception.status.should.equal(400) ex.exception.message.should.equal( @@ -113,9 +124,11 @@ def test_ami_copy(): copy_image.kernel_id.should.equal(source_image.kernel_id) copy_image.platform.should.equal(source_image.platform) - # Validate auto-created volume and snapshot - conn.get_all_volumes().should.have.length_of(2) - conn.get_all_snapshots().should.have.length_of(2) + # Ensure we're no longer creating a volume + conn.get_all_volumes().should.have.length_of(0) + + # Validate auto-created snapshot + conn.get_all_snapshots().should.have.length_of(initial_ami_count + 2) copy_image.block_device_mapping.current_value.snapshot_id.should_not.equal( source_image.block_device_mapping.current_value.snapshot_id) @@ -217,7 +230,8 @@ def test_ami_filters(): amis_by_architecture = conn.get_all_images( filters={'architecture': 'x86_64'}) - set([ami.id for ami in amis_by_architecture]).should.equal(set([imageB.id])) + set([ami.id for ami in amis_by_architecture]).should.contain(imageB.id) + len(amis_by_architecture).should.equal(35) amis_by_kernel = conn.get_all_images(filters={'kernel-id': 'k-abcd1234'}) set([ami.id for ami in amis_by_kernel]).should.equal(set([imageB.id])) @@ -225,26 +239,32 @@ def test_ami_filters(): amis_by_virtualization = conn.get_all_images( filters={'virtualization-type': 'paravirtual'}) set([ami.id for ami in amis_by_virtualization] - ).should.equal(set([imageB.id])) + ).should.contain(imageB.id) + len(amis_by_virtualization).should.equal(3) amis_by_platform = conn.get_all_images(filters={'platform': 'windows'}) - set([ami.id for ami in amis_by_platform]).should.equal(set([imageA.id])) + set([ami.id for ami in amis_by_platform]).should.contain(imageA.id) + len(amis_by_platform).should.equal(24) amis_by_id = conn.get_all_images(filters={'image-id': imageA.id}) set([ami.id for ami in amis_by_id]).should.equal(set([imageA.id])) amis_by_state = conn.get_all_images(filters={'state': 'available'}) - set([ami.id for ami in amis_by_state]).should.equal( - set([imageA.id, imageB.id])) + ami_ids_by_state = [ami.id for ami in amis_by_state] + ami_ids_by_state.should.contain(imageA.id) + ami_ids_by_state.should.contain(imageB.id) + len(amis_by_state).should.equal(36) amis_by_name = conn.get_all_images(filters={'name': imageA.name}) set([ami.id for ami in amis_by_name]).should.equal(set([imageA.id])) amis_by_public = conn.get_all_images(filters={'is-public': True}) - set([ami.id for ami in amis_by_public]).should.equal(set([imageB.id])) + set([ami.id for ami in amis_by_public]).should.contain(imageB.id) + len(amis_by_public).should.equal(35) amis_by_nonpublic = conn.get_all_images(filters={'is-public': False}) - set([ami.id for ami in amis_by_nonpublic]).should.equal(set([imageA.id])) + set([ami.id for ami in amis_by_nonpublic]).should.contain(imageA.id) + len(amis_by_nonpublic).should.equal(1) @mock_ec2_deprecated @@ -427,18 +447,17 @@ def test_ami_attribute_user_permissions(): **REMOVE_USERS_ARGS).should_not.throw(EC2ResponseError) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, - Name='TestImage',)['ImageId'] - + Name='TestImage', )['ImageId'] USER1 = '123456789011' @@ -460,19 +479,18 @@ def test_ami_describe_executable_users(): images[0]['ImageId'].should.equal(image_id) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users_negative(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, Name='TestImage')['ImageId'] - USER1 = '123456789011' USER2 = '113355789012' @@ -481,6 +499,7 @@ def test_ami_describe_executable_users_negative(): 'OperationType': 'add', 'UserIds': [USER1]} + # Add users and get no images # Add users and get no images conn.modify_image_attribute(**ADD_USER_ARGS) @@ -493,18 +512,17 @@ def test_ami_describe_executable_users_negative(): images.should.have.length_of(0) -@mock_ec2_deprecated +@mock_ec2 def test_ami_describe_executable_users_and_filter(): conn = boto3.client('ec2', region_name='us-east-1') ec2 = boto3.resource('ec2', 'us-east-1') ec2.create_instances(ImageId='', MinCount=1, MaxCount=1) - response = conn.describe_instances(Filters=[{'Name': 'instance-state-name','Values': ['running']}]) + response = conn.describe_instances(Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]) instance_id = response['Reservations'][0]['Instances'][0]['InstanceId'] image_id = conn.create_image(InstanceId=instance_id, - Name='ImageToDelete',)['ImageId'] - + Name='ImageToDelete', )['ImageId'] USER1 = '123456789011' @@ -667,12 +685,34 @@ def test_ami_attribute_error_cases(): @mock_ec2 -def test_ami_filter_wildcard(): +def test_ami_describe_non_existent(): ec2 = boto3.resource('ec2', region_name='us-west-1') - instance = ec2.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] - image = instance.create_image(Name='test-image') - filter_result = list(ec2.images.filter(Owners=['111122223333'], Filters=[{'Name':'name', 'Values':['test*']}])) - assert filter_result == [image] + # Valid pattern but non-existent id + img = ec2.Image('ami-abcd1234') + with assert_raises(ClientError): + img.load() + # Invalid ami pattern + img = ec2.Image('not_an_ami_id') + with assert_raises(ClientError): + img.load() + + +@mock_ec2 +def test_ami_filter_wildcard(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + # create an image with the same owner but will not match the filter + instance.create_image(Name='not-matching-image') + + my_images = ec2_client.describe_images( + Owners=['111122223333'], + Filters=[{'Name': 'name', 'Values': ['test*']}] + )['Images'] + my_images.should.have.length_of(1) @mock_ec2 @@ -691,3 +731,46 @@ def test_ami_filter_by_owner_id(): assert all(ubuntu_ids) and ubuntu_ids[0] == ubuntu_id # Check we actually have a subset of images assert len(ubuntu_ids) < len(all_ids) + + +@mock_ec2 +def test_ami_filter_by_self(): + ec2_resource = boto3.resource('ec2', region_name='us-west-1') + ec2_client = boto3.client('ec2', region_name='us-west-1') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(0) + + # Create a new image + instance = ec2_resource.create_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)[0] + instance.create_image(Name='test-image') + + my_images = ec2_client.describe_images(Owners=['self'])['Images'] + my_images.should.have.length_of(1) + + +@mock_ec2 +def test_ami_snapshots_have_correct_owner(): + ec2_client = boto3.client('ec2', region_name='us-west-1') + + images_response = ec2_client.describe_images() + + owner_id_to_snapshot_ids = {} + for image in images_response['Images']: + owner_id = image['OwnerId'] + snapshot_ids = [ + block_device_mapping['Ebs']['SnapshotId'] + for block_device_mapping in image['BlockDeviceMappings'] + ] + existing_snapshot_ids = owner_id_to_snapshot_ids.get(owner_id, []) + owner_id_to_snapshot_ids[owner_id] = ( + existing_snapshot_ids + snapshot_ids + ) + + for owner_id in owner_id_to_snapshot_ids: + snapshots_rseponse = ec2_client.describe_snapshots( + SnapshotIds=owner_id_to_snapshot_ids[owner_id] + ) + + for snapshot in snapshots_rseponse['Snapshots']: + assert owner_id == snapshot['OwnerId'] diff --git a/tests/test_ec2/test_availability_zones_and_regions.py b/tests/test_ec2/test_availability_zones_and_regions.py index 7226cacaf..c64f075ca 100644 --- a/tests/test_ec2/test_availability_zones_and_regions.py +++ b/tests/test_ec2/test_availability_zones_and_regions.py @@ -36,6 +36,11 @@ def test_boto3_describe_regions(): for rec in resp['Regions']: rec['Endpoint'].should.contain(rec['RegionName']) + test_region = 'us-east-1' + resp = ec2.describe_regions(RegionNames=[test_region]) + resp['Regions'].should.have.length_of(1) + resp['Regions'][0].should.have.key('RegionName').which.should.equal(test_region) + @mock_ec2 def test_boto3_availability_zones(): diff --git a/tests/test_ec2/test_elastic_block_store.py b/tests/test_ec2/test_elastic_block_store.py index 9c07f38d6..32ce1be22 100644 --- a/tests/test_ec2/test_elastic_block_store.py +++ b/tests/test_ec2/test_elastic_block_store.py @@ -5,10 +5,12 @@ from nose.tools import assert_raises from moto.ec2 import ec2_backends import boto +import boto3 +from botocore.exceptions import ClientError from boto.exception import EC2ResponseError import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 @mock_ec2_deprecated @@ -185,6 +187,11 @@ def test_volume_filters(): {volume1.id, volume3.id, volume4.id} ) + volumes_by_availability_zone = conn.get_all_volumes(filters={'availability-zone': 'us-east-1b'}) + set([vol.id for vol in volumes_by_availability_zone if vol.id in volume_ids]).should.equal( + {volume2.id} + ) + @mock_ec2_deprecated def test_volume_attach_and_detach(): @@ -579,3 +586,78 @@ def test_volume_tag_escaping(): snaps = [snap for snap in conn.get_all_snapshots() if snap.id == snapshot.id] dict(snaps[0].tags).should.equal({'key': ''}) + + +@mock_ec2 +def test_copy_snapshot(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + dest_ec2_client = boto3.client('ec2', region_name='eu-west-2') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-1" + ) + + ec2 = boto3.resource('ec2', region_name='eu-west-1') + dest_ec2 = boto3.resource('ec2', region_name='eu-west-2') + + source = ec2.Snapshot(create_snapshot_response['SnapshotId']) + dest = dest_ec2.Snapshot(copy_snapshot_response['SnapshotId']) + + attribs = ['data_encryption_key_id', 'encrypted', + 'kms_key_id', 'owner_alias', 'owner_id', 'progress', + 'start_time', 'state', 'state_message', + 'tags', 'volume_id', 'volume_size'] + + for attrib in attribs: + getattr(source, attrib).should.equal(getattr(dest, attrib)) + + # Copy from non-existent source ID. + with assert_raises(ClientError) as cm: + create_snapshot_error = ec2_client.create_snapshot( + VolumeId='vol-abcd1234' + ) + cm.exception.response['Error']['Code'].should.equal('InvalidVolume.NotFound') + cm.exception.response['Error']['Message'].should.equal("The volume 'vol-abcd1234' does not exist.") + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + + # Copy from non-existent source region. + with assert_raises(ClientError) as cm: + copy_snapshot_response = dest_ec2_client.copy_snapshot( + SourceSnapshotId=create_snapshot_response['SnapshotId'], + SourceRegion="eu-west-2" + ) + cm.exception.response['Error']['Code'].should.equal('InvalidSnapshot.NotFound') + cm.exception.response['Error']['Message'].should.be.none + cm.exception.response['ResponseMetadata']['RequestId'].should_not.be.none + cm.exception.response['ResponseMetadata']['HTTPStatusCode'].should.equal(400) + +@mock_ec2 +def test_search_for_many_snapshots(): + ec2_client = boto3.client('ec2', region_name='eu-west-1') + + volume_response = ec2_client.create_volume( + AvailabilityZone='eu-west-1a', Size=10 + ) + + snapshot_ids = [] + for i in range(1, 20): + create_snapshot_response = ec2_client.create_snapshot( + VolumeId=volume_response['VolumeId'] + ) + snapshot_ids.append(create_snapshot_response['SnapshotId']) + + snapshots_response = ec2_client.describe_snapshots( + SnapshotIds=snapshot_ids + ) + + assert len(snapshots_response['Snapshots']) == len(snapshot_ids) diff --git a/tests/test_ec2/test_spot_fleet.py b/tests/test_ec2/test_spot_fleet.py index a8d33c299..a2bd1d061 100644 --- a/tests/test_ec2/test_spot_fleet.py +++ b/tests/test_ec2/test_spot_fleet.py @@ -316,3 +316,30 @@ def test_modify_spot_fleet_request_down_no_terminate_after_custom_terminate(): SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'][0]['SpotFleetRequestConfig'] spot_fleet_config['TargetCapacity'].should.equal(1) spot_fleet_config['FulfilledCapacity'].should.equal(2.0) + + +@mock_ec2 +def test_create_spot_fleet_without_spot_price(): + conn = boto3.client("ec2", region_name='us-west-2') + subnet_id = get_subnet_id(conn) + + # remove prices to force a fallback to ondemand price + spot_config_without_price = spot_config(subnet_id) + del spot_config_without_price['SpotPrice'] + for spec in spot_config_without_price['LaunchSpecifications']: + del spec['SpotPrice'] + + spot_fleet_id = conn.request_spot_fleet(SpotFleetRequestConfig=spot_config_without_price)['SpotFleetRequestId'] + spot_fleet_requests = conn.describe_spot_fleet_requests( + SpotFleetRequestIds=[spot_fleet_id])['SpotFleetRequestConfigs'] + len(spot_fleet_requests).should.equal(1) + spot_fleet_request = spot_fleet_requests[0] + spot_fleet_config = spot_fleet_request['SpotFleetRequestConfig'] + + len(spot_fleet_config['LaunchSpecifications']).should.equal(2) + launch_spec1 = spot_fleet_config['LaunchSpecifications'][0] + launch_spec2 = spot_fleet_config['LaunchSpecifications'][1] + + # AWS will figure out the price + assert 'SpotPrice' not in launch_spec1 + assert 'SpotPrice' not in launch_spec2 diff --git a/tests/test_ec2/test_tags.py b/tests/test_ec2/test_tags.py index ccef5a288..c92a4f81f 100644 --- a/tests/test_ec2/test_tags.py +++ b/tests/test_ec2/test_tags.py @@ -4,11 +4,12 @@ from nose.tools import assert_raises import itertools import boto +import boto3 from boto.exception import EC2ResponseError from boto.ec2.instance import Reservation import sure # noqa -from moto import mock_ec2_deprecated +from moto import mock_ec2_deprecated, mock_ec2 from nose.tools import assert_raises @@ -385,3 +386,68 @@ def test_filter_instances_by_wildcard_tags(): reservations = conn.get_all_instances(filters={'tag-value': 'Value*'}) reservations.should.have.length_of(2) + + +@mock_ec2 +def test_create_volume_with_tags(): + client = boto3.client('ec2', 'us-west-2') + response = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + ) + + assert response['Tags'][0]['Key'] == 'TEST_TAG' + + +@mock_ec2 +def test_create_snapshot_with_tags(): + client = boto3.client('ec2', 'us-west-2') + volume_id = client.create_volume( + AvailabilityZone='us-west-2', + Encrypted=False, + Size=40, + TagSpecifications=[ + { + 'ResourceType': 'volume', + 'Tags': [ + { + 'Key': 'TEST_TAG', + 'Value': 'TEST_VALUE' + } + ], + } + ] + )['VolumeId'] + snapshot = client.create_snapshot( + VolumeId=volume_id, + TagSpecifications=[ + { + 'ResourceType': 'snapshot', + 'Tags': [ + { + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + } + ], + } + ] + ) + + expected_tags = [{ + 'Key': 'TEST_SNAPSHOT_TAG', + 'Value': 'TEST_SNAPSHOT_VALUE' + }] + + assert snapshot['Tags'] == expected_tags diff --git a/tests/test_ec2/test_vpcs.py b/tests/test_ec2/test_vpcs.py index fc0a93cbb..318491b44 100644 --- a/tests/test_ec2/test_vpcs.py +++ b/tests/test_ec2/test_vpcs.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals # Ensure 'assert_raises' context manager support for Python 2.6 import tests.backport_assert_raises # flake8: noqa from nose.tools import assert_raises +from moto.ec2.exceptions import EC2ClientError +from botocore.exceptions import ClientError import boto3 import boto @@ -275,8 +277,8 @@ def test_default_vpc(): def test_non_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') - # Create the default VPC - ec2.create_vpc(CidrBlock='172.31.0.0/16') + # Create the default VPC - this already exists when backend instantiated! + #ec2.create_vpc(CidrBlock='172.31.0.0/16') # Create the non default VPC vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') @@ -295,6 +297,12 @@ def test_non_default_vpc(): attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok + # Check Primary CIDR Block Associations + cidr_block_association_set = next(iter(vpc.cidr_block_association_set), None) + cidr_block_association_set['CidrBlockState']['State'].should.equal('associated') + cidr_block_association_set['CidrBlock'].should.equal(vpc.cidr_block) + cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + @mock_ec2 def test_vpc_dedicated_tenancy(): @@ -340,7 +348,6 @@ def test_vpc_modify_enable_dns_hostnames(): ec2.create_vpc(CidrBlock='172.31.0.0/16') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') - # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsHostnames') attr = response.get('EnableDnsHostnames') @@ -364,3 +371,171 @@ def test_vpc_associate_dhcp_options(): vpc.update() dhcp_options.id.should.equal(vpc.dhcp_options_id) + + +@mock_ec2 +def test_associate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + + # Associate/Extend vpc CIDR range up to 5 ciders + for i in range(43, 47): + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('associating') + response['CidrBlockAssociation']['CidrBlock'].should.equal('10.10.{}.0/24'.format(i)) + response['CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc') + + # Check all associations exist + vpc = ec2.Vpc(vpc.id) + vpc.cidr_block_association_set.should.have.length_of(5) + vpc.cidr_block_association_set[2]['CidrBlockState']['State'].should.equal('associated') + vpc.cidr_block_association_set[4]['CidrBlockState']['State'].should.equal('associated') + + # Check error on adding 6th association. + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.50.0/22') + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 5".format(vpc.id)) + +@mock_ec2 +def test_disassociate_vpc_ipv4_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, CidrBlock='10.10.43.0/24') + + # Remove an extended cidr block + vpc = ec2.Vpc(vpc.id) + non_default_assoc_cidr_block = next(iter([x for x in vpc.cidr_block_association_set if vpc.cidr_block != x['CidrBlock']]), None) + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=non_default_assoc_cidr_block['AssociationId']) + response['CidrBlockAssociation']['CidrBlockState']['State'].should.equal('disassociating') + response['CidrBlockAssociation']['CidrBlock'].should.equal(non_default_assoc_cidr_block['CidrBlock']) + response['CidrBlockAssociation']['AssociationId'].should.equal(non_default_assoc_cidr_block['AssociationId']) + + # Error attempting to delete a non-existent CIDR_BLOCK association + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId='vpc-cidr-assoc-BORING123') + str(ex.exception).should.equal( + "An error occurred (InvalidVpcCidrBlockAssociationIdError.NotFound) when calling the " + "DisassociateVpcCidrBlock operation: The vpc CIDR block association ID " + "'vpc-cidr-assoc-BORING123' does not exist") + + # Error attempting to delete Primary CIDR BLOCK association + vpc_base_cidr_assoc_id = next(iter([x for x in vpc.cidr_block_association_set + if vpc.cidr_block == x['CidrBlock']]), {})['AssociationId'] + + with assert_raises(ClientError) as ex: + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=vpc_base_cidr_assoc_id) + str(ex.exception).should.equal( + "An error occurred (OperationNotPermitted) when calling the DisassociateVpcCidrBlock operation: " + "The vpc CIDR block with association ID {} may not be disassociated. It is the primary " + "IPv4 CIDR block of the VPC".format(vpc_base_cidr_assoc_id)) + +@mock_ec2 +def test_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + vpc3_assoc_response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.3.0/24') + + # Test filters for a cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.cidr-block', + 'Values': ['10.10.0.0/19']}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association id in VPCs + association_id = vpc3_assoc_response['CidrBlockAssociation']['AssociationId'] + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': [association_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'cidr-block-association.association-id', + 'Values': ['failing']}])) + filtered_vpcs.should.be.length_of(0) + +@mock_ec2 +def test_vpc_associate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + ipv6_cidr_block_association_set = next(iter(vpc.ipv6_cidr_block_association_set), None) + ipv6_cidr_block_association_set['Ipv6CidrBlockState']['State'].should.equal('associated') + ipv6_cidr_block_association_set['Ipv6CidrBlock'].should.contain('::/56') + ipv6_cidr_block_association_set['AssociationId'].should.contain('vpc-cidr-assoc') + + # Test Fail on adding 2nd IPV6 association - AWS only allows 1 at this time! + with assert_raises(ClientError) as ex: + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + str(ex.exception).should.equal( + "An error occurred (CidrLimitExceeded) when calling the AssociateVpcCidrBlock " + "operation: This network '{}' has met its maximum number of allowed CIDRs: 1".format(vpc.id)) + + # Test associate ipv6 cidr block after vpc created + vpc = ec2.create_vpc(CidrBlock='10.10.50.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc.id, AmazonProvidedIpv6CidrBlock=True) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('associating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.contain('vpc-cidr-assoc-') + + # Check on describe vpc that has ipv6 cidr block association + vpc = ec2.Vpc(vpc.id) + vpc.ipv6_cidr_block_association_set.should.be.length_of(1) + + +@mock_ec2 +def test_vpc_disassociate_ipv6_cidr_block(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + + # Test create VPC with IPV6 cidr range + vpc = ec2.create_vpc(CidrBlock='10.10.42.0/24', AmazonProvidedIpv6CidrBlock=True) + # Test disassociating the only IPV6 + assoc_id = vpc.ipv6_cidr_block_association_set[0]['AssociationId'] + response = ec2.meta.client.disassociate_vpc_cidr_block(AssociationId=assoc_id) + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlockState']['State'].should.equal('disassociating') + response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'].should.contain('::/56') + response['Ipv6CidrBlockAssociation']['AssociationId'].should.equal(assoc_id) + + +@mock_ec2 +def test_ipv6_cidr_block_association_filters(): + ec2 = boto3.resource('ec2', region_name='us-west-1') + vpc1 = ec2.create_vpc(CidrBlock='10.90.0.0/16') + + vpc2 = ec2.create_vpc(CidrBlock='10.91.0.0/16', AmazonProvidedIpv6CidrBlock=True) + vpc2_assoc_ipv6_assoc_id = vpc2.ipv6_cidr_block_association_set[0]['AssociationId'] + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc2.id, CidrBlock='10.10.0.0/19') + + vpc3 = ec2.create_vpc(CidrBlock='10.92.0.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.1.0/24') + ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, CidrBlock='10.92.2.0/24') + response = ec2.meta.client.associate_vpc_cidr_block(VpcId=vpc3.id, AmazonProvidedIpv6CidrBlock=True) + vpc3_ipv6_cidr_block = response['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] + + vpc4 = ec2.create_vpc(CidrBlock='10.95.0.0/16') # Here for its looks + + # Test filters for an ipv6 cidr-block in all VPCs cidr-block-associations + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.ipv6-cidr-block', + 'Values': [vpc3_ipv6_cidr_block]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc3.id) + + # Test filter for association id in VPCs + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.association-id', + 'Values': [vpc2_assoc_ipv6_assoc_id]}])) + filtered_vpcs.should.be.length_of(1) + filtered_vpcs[0].id.should.equal(vpc2.id) + + # Test filter for association state in VPC - this will never show anything in this test + filtered_vpcs = list(ec2.vpcs.filter(Filters=[{'Name': 'ipv6-cidr-block-association.state', + 'Values': ['associated']}])) + filtered_vpcs.should.be.length_of(2) # 2 of 4 VPCs diff --git a/tests/test_ecr/test_ecr_boto3.py b/tests/test_ecr/test_ecr_boto3.py index 00628e22f..b4497ef60 100644 --- a/tests/test_ecr/test_ecr_boto3.py +++ b/tests/test_ecr/test_ecr_boto3.py @@ -9,7 +9,7 @@ import re import sure # noqa import boto3 -from botocore.exceptions import ClientError +from botocore.exceptions import ClientError, ParamValidationError from dateutil.tz import tzlocal from moto import mock_ecr @@ -445,3 +445,117 @@ def test_get_authorization_token_explicit_regions(): } ]) + + +@mock_ecr +def test_batch_get_image(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v2' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(1) + + response['images'][0]['imageManifest'].should.contain("vnd.docker.distribution.manifest.v2+json") + response['images'][0]['registryId'].should.equal("012345678910") + response['images'][0]['repositoryName'].should.equal("test_repository") + + response['images'][0]['imageId']['imageTag'].should.equal("v2") + response['images'][0]['imageId']['imageDigest'].should.contain("sha") + + type(response['failures']).should.be(list) + len(response['failures']).should.be(0) + + +@mock_ecr +def test_batch_get_image_that_doesnt_exist(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v1' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='v2' + ) + + response = client.batch_get_image( + repositoryName='test_repository', + imageIds=[ + { + 'imageTag': 'v5' + }, + ], + ) + + type(response['images']).should.be(list) + len(response['images']).should.be(0) + + type(response['failures']).should.be(list) + len(response['failures']).should.be(1) + response['failures'][0]['failureReason'].should.equal("Requested image not found") + response['failures'][0]['failureCode'].should.equal("ImageNotFound") + response['failures'][0]['imageId']['imageTag'].should.equal("v5") + + +@mock_ecr +def test_batch_get_image_no_tags(): + client = boto3.client('ecr', region_name='us-east-1') + _ = client.create_repository( + repositoryName='test_repository' + ) + + _ = client.put_image( + repositoryName='test_repository', + imageManifest=json.dumps(_create_image_manifest()), + imageTag='latest' + ) + + error_msg = re.compile( + r".*Missing required parameter in input: \"imageIds\".*", + re.MULTILINE) + + client.batch_get_image.when.called_with( + repositoryName='test_repository').should.throw( + ParamValidationError, error_msg) diff --git a/tests/test_ecs/test_ecs_boto3.py b/tests/test_ecs/test_ecs_boto3.py index 5fcc297aa..d2cfd3724 100644 --- a/tests/test_ecs/test_ecs_boto3.py +++ b/tests/test_ecs/test_ecs_boto3.py @@ -664,7 +664,7 @@ def test_list_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) response = ecs_client.list_container_instances(cluster=test_cluster_name) @@ -702,7 +702,7 @@ def test_describe_container_instances(): instanceIdentityDocument=instance_id_document) test_instance_arns.append(response['containerInstance'][ - 'containerInstanceArn']) + 'containerInstanceArn']) test_instance_ids = list( map((lambda x: x.split('/')[1]), test_instance_arns)) @@ -1052,7 +1052,7 @@ def test_describe_tasks(): len(response['tasks']).should.equal(2) set([response['tasks'][0]['taskArn'], response['tasks'] - [1]['taskArn']]).should.equal(set(tasks_arns)) + [1]['taskArn']]).should.equal(set(tasks_arns)) @mock_ecs @@ -1208,7 +1208,8 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] - remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU'] - 1024) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) registered_resources['PORTS'].append('80') @@ -1223,6 +1224,87 @@ def test_resource_reservation_and_release(): cluster='test_ecs_cluster', containerInstances=[container_instance_arn] )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources( + container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(0) + +@mock_ec2 +@mock_ecs +def test_resource_reservation_and_release_memory_reservation(): + client = boto3.client('ecs', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + test_cluster_name = 'test_ecs_cluster' + + _ = client.create_cluster( + clusterName=test_cluster_name + ) + + test_instance = ec2.create_instances( + ImageId="ami-1234abcd", + MinCount=1, + MaxCount=1, + )[0] + + instance_id_document = json.dumps( + ec2_utils.generate_instance_identity_document(test_instance) + ) + + _ = client.register_container_instance( + cluster=test_cluster_name, + instanceIdentityDocument=instance_id_document + ) + + _ = client.register_task_definition( + family='test_ecs_task', + containerDefinitions=[ + { + 'name': 'hello_world', + 'image': 'docker/hello-world:latest', + 'memoryReservation': 400, + 'essential': True, + 'environment': [{ + 'name': 'AWS_ACCESS_KEY_ID', + 'value': 'SOME_ACCESS_KEY' + }], + 'logConfiguration': {'logDriver': 'json-file'}, + 'portMappings': [ + { + 'containerPort': 8080 + } + ] + } + ] + ) + run_response = client.run_task( + cluster='test_ecs_cluster', + overrides={}, + taskDefinition='test_ecs_task', + count=1, + startedBy='moto' + ) + container_instance_arn = run_response['tasks'][0].get('containerInstanceArn') + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] + remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) + remaining_resources['CPU'].should.equal(registered_resources['CPU']) + remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY'] - 400) + remaining_resources['PORTS'].should.equal(registered_resources['PORTS']) + container_instance_description['runningTasksCount'].should.equal(1) + client.stop_task( + cluster='test_ecs_cluster', + task=run_response['tasks'][0].get('taskArn'), + reason='moto testing' + ) + container_instance_description = client.describe_container_instances( + cluster='test_ecs_cluster', + containerInstances=[container_instance_arn] + )['containerInstances'][0] remaining_resources, registered_resources = _fetch_container_instance_resources(container_instance_description) remaining_resources['CPU'].should.equal(registered_resources['CPU']) remaining_resources['MEMORY'].should.equal(registered_resources['MEMORY']) @@ -1230,6 +1312,7 @@ def test_resource_reservation_and_release(): container_instance_description['runningTasksCount'].should.equal(0) + @mock_ecs @mock_cloudformation def test_create_cluster_through_cloudformation(): @@ -1246,6 +1329,36 @@ def test_create_cluster_through_cloudformation(): } } template_json = json.dumps(template) + + ecs_conn = boto3.client('ecs', region_name='us-west-1') + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(0) + + cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + cfn_conn.create_stack( + StackName="test_stack", + TemplateBody=template_json, + ) + + resp = ecs_conn.list_clusters() + len(resp['clusterArns']).should.equal(1) + + +@mock_ecs +@mock_cloudformation +def test_create_cluster_through_cloudformation_no_name(): + # cloudformation should create a cluster name for you if you do not provide it + # https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecs-cluster.html#cfn-ecs-cluster-clustername + template = { + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "ECS Cluster Test CloudFormation", + "Resources": { + "testCluster": { + "Type": "AWS::ECS::Cluster", + } + } + } + template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') cfn_conn.create_stack( StackName="test_stack", @@ -1319,15 +1432,20 @@ def test_create_task_definition_through_cloudformation(): } template_json = json.dumps(template) cfn_conn = boto3.client('cloudformation', region_name='us-west-1') + stack_name = 'test_stack' cfn_conn.create_stack( - StackName="test_stack", + StackName=stack_name, TemplateBody=template_json, ) ecs_conn = boto3.client('ecs', region_name='us-west-1') resp = ecs_conn.list_task_definitions() len(resp['taskDefinitionArns']).should.equal(1) + task_definition_arn = resp['taskDefinitionArns'][0] + task_definition_details = cfn_conn.describe_stack_resource( + StackName=stack_name,LogicalResourceId='testTaskDefinition')['StackResourceDetail'] + task_definition_details['PhysicalResourceId'].should.equal(task_definition_arn) @mock_ec2 @mock_ecs @@ -1674,7 +1792,8 @@ def test_attributes(): attributes=[ {'name': 'env', 'value': 'prod'}, {'name': 'attr1', 'value': 'instance1', 'targetId': full_arn1}, - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) @@ -1690,12 +1809,14 @@ def test_attributes(): # Tests that the attrs have been set properly len(list(filter(lambda item: item['name'] == 'env', attrs))).should.equal(2) - len(list(filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) + len(list( + filter(lambda item: item['name'] == 'attr1' and item['value'] == 'instance1', attrs))).should.equal(1) ecs_client.delete_attributes( cluster=test_cluster_name, attributes=[ - {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, 'targetType': 'container-instance'} + {'name': 'attr1', 'value': 'instance2', 'targetId': partial_arn2, + 'targetType': 'container-instance'} ] ) NUM_CUSTOM_ATTRIBUTES -= 1 @@ -1806,7 +1927,8 @@ def test_default_container_instance_attributes(): {'name': 'ecs.instance-type', 'value': test_instance.instance_type}, {'name': 'ecs.os-type', 'value': test_instance.platform or 'linux'} ] - assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, key=lambda item: item['name']) + assert sorted(default_attributes, key=lambda item: item['name']) == sorted(expected_result, + key=lambda item: item['name']) @mock_ec2 @@ -1846,17 +1968,19 @@ def test_describe_container_instances_with_attributes(): # Set attributes on container instance, one without a value attributes = [ - {'name': 'env', 'value': 'prod'}, - {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, 'targetType': 'container-instance'}, - {'name': 'attr_without_value'} - ] + {'name': 'env', 'value': 'prod'}, + {'name': 'attr1', 'value': 'instance1', 'targetId': container_instance_id, + 'targetType': 'container-instance'}, + {'name': 'attr_without_value'} + ] ecs_client.put_attributes( cluster=test_cluster_name, attributes=attributes ) # Describe container instance, should have attributes previously set - described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, containerInstances=[container_instance_id]) + described_instance = ecs_client.describe_container_instances(cluster=test_cluster_name, + containerInstances=[container_instance_id]) assert len(described_instance['containerInstances']) == 1 assert isinstance(described_instance['containerInstances'][0]['attributes'], list) @@ -1867,7 +1991,8 @@ def test_describe_container_instances_with_attributes(): attribute.pop('targetId', None) attribute.pop('targetType', None) cleaned_attributes.append(attribute) - described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], key=lambda item: item['name']) + described_attributes = sorted(described_instance['containerInstances'][0]['attributes'], + key=lambda item: item['name']) expected_attributes = sorted(default_attributes + cleaned_attributes, key=lambda item: item['name']) assert described_attributes == expected_attributes @@ -1877,10 +2002,16 @@ def _fetch_container_instance_resources(container_instance_description): registered_resources = {} remaining_resources_list = container_instance_description['remainingResources'] registered_resources_list = container_instance_description['registeredResources'] - remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][0] - remaining_resources['MEMORY'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] - remaining_resources['PORTS'] = [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] - registered_resources['CPU'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] - registered_resources['MEMORY'] = [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] - registered_resources['PORTS'] = [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] + remaining_resources['CPU'] = [x['integerValue'] for x in remaining_resources_list if x['name'] == 'CPU'][ + 0] + remaining_resources['MEMORY'] = \ + [x['integerValue'] for x in remaining_resources_list if x['name'] == 'MEMORY'][0] + remaining_resources['PORTS'] = \ + [x['stringSetValue'] for x in remaining_resources_list if x['name'] == 'PORTS'][0] + registered_resources['CPU'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'CPU'][0] + registered_resources['MEMORY'] = \ + [x['integerValue'] for x in registered_resources_list if x['name'] == 'MEMORY'][0] + registered_resources['PORTS'] = \ + [x['stringSetValue'] for x in registered_resources_list if x['name'] == 'PORTS'][0] return remaining_resources, registered_resources diff --git a/tests/test_elbv2/test_elbv2.py b/tests/test_elbv2/test_elbv2.py index 4fb527525..ce092976a 100644 --- a/tests/test_elbv2/test_elbv2.py +++ b/tests/test_elbv2/test_elbv2.py @@ -340,6 +340,10 @@ def test_create_target_group_and_listeners(): 'Type': 'forward'}]) http_listener_arn = listener.get('ListenerArn') + response = conn.describe_target_groups(LoadBalancerArn=load_balancer_arn, + Names=['a-target']) + response.get('TargetGroups').should.have.length_of(1) + # And another with SSL response = conn.create_listener( LoadBalancerArn=load_balancer_arn, diff --git a/tests/test_emr/test_emr.py b/tests/test_emr/test_emr.py index 4acd7067c..505c69b11 100644 --- a/tests/test_emr/test_emr.py +++ b/tests/test_emr/test_emr.py @@ -443,7 +443,7 @@ def test_bootstrap_actions(): BootstrapAction( name='bs1', path='path/to/script', - bootstrap_action_args=['arg1', 'arg2']), + bootstrap_action_args=['arg1', 'arg2&arg3']), BootstrapAction( name='bs2', path='path/to/anotherscript', @@ -551,7 +551,7 @@ def test_steps(): input='s3n://elasticmapreduce/samples/wordcount/input', output='s3n://output_bucket/output/wordcount_output'), StreamingStep( - name='My wordcount example2', + name='My wordcount example & co.', mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter2.py', reducer='aggregate', input='s3n://elasticmapreduce/samples/wordcount/input2', diff --git a/tests/test_glacier/test_glacier_jobs.py b/tests/test_glacier/test_glacier_jobs.py index 66780f681..152aa14c8 100644 --- a/tests/test_glacier/test_glacier_jobs.py +++ b/tests/test_glacier/test_glacier_jobs.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals import json +import time from boto.glacier.layer1 import Layer1 import sure # noqa @@ -39,24 +40,11 @@ def test_describe_job(): job_id = job_response['JobId'] job = conn.describe_job(vault_name, job_id) - json.loads(job.read().decode("utf-8")).should.equal({ - 'CompletionDate': '2013-03-20T17:03:43.221Z', - 'VaultARN': None, - 'RetrievalByteRange': None, - 'SHA256TreeHash': None, - 'Completed': True, - 'InventorySizeInBytes': '0', - 'JobId': job_id, - 'Action': 'InventoryRetrieval', - 'JobDescription': None, - 'SNSTopic': None, - 'ArchiveSizeInBytes': 0, - 'ArchiveId': archive_id, - 'ArchiveSHA256TreeHash': None, - 'CreationDate': '2013-03-20T17:03:43.221Z', - 'StatusMessage': None, - 'StatusCode': 'Succeeded', - }) + joboutput = json.loads(job.read().decode("utf-8")) + + joboutput.should.have.key('Tier').which.should.equal('Standard') + joboutput.should.have.key('StatusCode').which.should.equal('InProgress') + joboutput.should.have.key('VaultARN').which.should.equal('arn:aws:glacier:RegionInfo:us-west-2:012345678901:vaults/my_vault') @mock_glacier_deprecated @@ -96,5 +84,7 @@ def test_get_job_output(): }) job_id = job_response['JobId'] + time.sleep(6) + output = conn.get_job_output(vault_name, job_id) output.read().decode("utf-8").should.equal("some stuff") diff --git a/tests/test_iam/test_iam.py b/tests/test_iam/test_iam.py index d50f6999e..b4dfe532d 100644 --- a/tests/test_iam/test_iam.py +++ b/tests/test_iam/test_iam.py @@ -651,3 +651,21 @@ def test_attach_detach_user_policy(): resp = client.list_attached_user_policies(UserName=user.name) resp['AttachedPolicies'].should.have.length_of(0) + + +@mock_iam +def test_update_access_key(): + iam = boto3.resource('iam', region_name='us-east-1') + client = iam.meta.client + username = 'test-user' + iam.create_user(UserName=username) + with assert_raises(ClientError): + client.update_access_key(UserName=username, + AccessKeyId='non-existent-key', + Status='Inactive') + key = client.create_access_key(UserName=username)['AccessKey'] + client.update_access_key(UserName=username, + AccessKeyId=key['AccessKeyId'], + Status='Inactive') + resp = client.list_access_keys(UserName=username) + resp['AccessKeyMetadata'][0]['Status'].should.equal('Inactive') diff --git a/tests/test_iot/test_iot.py b/tests/test_iot/test_iot.py index 31631e459..e69e55fc0 100644 --- a/tests/test_iot/test_iot.py +++ b/tests/test_iot/test_iot.py @@ -96,6 +96,23 @@ def test_certs(): res = client.list_certificates() res.should.have.key('certificates').which.should.have.length_of(0) +@mock_iot +def test_certs_create_inactive(): + client = boto3.client('iot', region_name='ap-northeast-1') + cert = client.create_keys_and_certificate(setAsActive=False) + cert_id = cert['certificateId'] + + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('INACTIVE') + + client.update_certificate(certificateId=cert_id, newStatus='ACTIVE') + cert = client.describe_certificate(certificateId=cert_id) + cert.should.have.key('certificateDescription') + cert_desc = cert['certificateDescription'] + cert_desc.should.have.key('status').which.should.equal('ACTIVE') + @mock_iot def test_policy(): client = boto3.client('iot', region_name='ap-northeast-1') @@ -177,3 +194,192 @@ def test_principal_thing(): res.should.have.key('things').which.should.have.length_of(0) res = client.list_thing_principals(thingName=thing_name) res.should.have.key('principals').which.should.have.length_of(0) + + +@mock_iot +def test_thing_groups(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(1) + for thing_group in res['thingGroups']: + thing_group.should.have.key('groupName').which.should_not.be.none + thing_group.should.have.key('groupArn').which.should_not.be.none + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupProperties') + thing_group.should.have.key('thingGroupMetadata') + thing_group.should.have.key('version') + + # delete thing group + client.delete_thing_group(thingGroupName=group_name) + res = client.list_thing_groups() + res.should.have.key('thingGroups').which.should.have.length_of(0) + + # props create test + props = { + 'thingGroupDescription': 'my first thing group', + 'attributePayload': { + 'attributes': { + 'key1': 'val01', + 'Key02': 'VAL2' + } + } + } + thing_group = client.create_thing_group(thingGroupName=group_name, thingGroupProperties=props) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + # props update test with merge + new_props = { + 'attributePayload': { + 'attributes': { + 'k3': 'v3' + }, + 'merge': True + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('key1').which.should.equal('val01') + res_props.should.have.key('Key02').which.should.equal('VAL2') + + res_props.should.have.key('k3').which.should.equal('v3') + + # props update test + new_props = { + 'attributePayload': { + 'attributes': { + 'k4': 'v4' + } + } + } + client.update_thing_group( + thingGroupName=group_name, + thingGroupProperties=new_props + ) + thing_group = client.describe_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupProperties')\ + .which.should.have.key('attributePayload')\ + .which.should.have.key('attributes') + res_props = thing_group['thingGroupProperties']['attributePayload']['attributes'] + res_props.should.have.key('k4').which.should.equal('v4') + res_props.should_not.have.key('key1') + + +@mock_iot +def test_thing_group_relations(): + client = boto3.client('iot', region_name='ap-northeast-1') + name = 'my-thing' + group_name = 'my-group-name' + + # thing group + thing_group = client.create_thing_group(thingGroupName=group_name) + thing_group.should.have.key('thingGroupName').which.should.equal(group_name) + thing_group.should.have.key('thingGroupArn') + + # thing + thing = client.create_thing(thingName=name) + thing.should.have.key('thingName').which.should.equal(name) + thing.should.have.key('thingArn') + + # add in 4 way + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.add_thing_to_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + thing_groups = client.list_thing_groups_for_thing( + thingName=name + ) + thing_groups.should.have.key('thingGroups') + thing_groups['thingGroups'].should.have.length_of(1) + + # remove in 4 way + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingName=name + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupName=group_name, + thingArn=thing['thingArn'] + ) + client.remove_thing_from_thing_group( + thingGroupArn=thing_group['thingGroupArn'], + thingName=name + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) + + # update thing group for thing + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToAdd=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(1) + + client.update_thing_groups_for_thing( + thingName=name, + thingGroupsToRemove=[ + group_name + ] + ) + things = client.list_things_in_thing_group( + thingGroupName=group_name + ) + things.should.have.key('things') + things['things'].should.have.length_of(0) diff --git a/tests/test_logs/test_logs.py b/tests/test_logs/test_logs.py index 392b3f7e9..a9a7f5260 100644 --- a/tests/test_logs/test_logs.py +++ b/tests/test_logs/test_logs.py @@ -1,7 +1,9 @@ import boto3 import sure # noqa +from botocore.exceptions import ClientError from moto import mock_logs, settings +from nose.tools import assert_raises _logs_region = 'us-east-1' if settings.TEST_SERVER_MODE else 'us-west-2' @@ -12,3 +14,103 @@ def test_log_group_create(): log_group_name = 'dummy' response = conn.create_log_group(logGroupName=log_group_name) response = conn.delete_log_group(logGroupName=log_group_name) + + +@mock_logs +def test_exceptions(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'dummp-stream' + conn.create_log_group(logGroupName=log_group_name) + with assert_raises(ClientError): + conn.create_log_group(logGroupName=log_group_name) + + # descrine_log_groups is not implemented yet + + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + with assert_raises(ClientError): + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + with assert_raises(ClientError): + conn.put_log_events( + logGroupName=log_group_name, + logStreamName="invalid-stream", + logEvents=[ + { + 'timestamp': 0, + 'message': 'line' + }, + ], + ) + + +@mock_logs +def test_put_logs(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.get_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + events = res['events'] + events.should.have.length_of(2) + + +@mock_logs +def test_filter_logs_interleaved(): + conn = boto3.client('logs', 'us-west-2') + log_group_name = 'dummy' + log_stream_name = 'stream' + conn.create_log_group(logGroupName=log_group_name) + conn.create_log_stream( + logGroupName=log_group_name, + logStreamName=log_stream_name + ) + messages = [ + {'timestamp': 0, 'message': 'hello'}, + {'timestamp': 0, 'message': 'world'} + ] + conn.put_log_events( + logGroupName=log_group_name, + logStreamName=log_stream_name, + logEvents=messages + ) + res = conn.filter_log_events( + logGroupName=log_group_name, + logStreamNames=[log_stream_name], + interleaved=True, + ) + events = res['events'] + events.should.have.length_of(2) diff --git a/tests/test_opsworks/test_apps.py b/tests/test_opsworks/test_apps.py new file mode 100644 index 000000000..37d0f2fe4 --- /dev/null +++ b/tests/test_opsworks/test_apps.py @@ -0,0 +1,102 @@ +from __future__ import unicode_literals +import boto3 +from freezegun import freeze_time +import sure # noqa +import re + +from moto import mock_opsworks + + +@freeze_time("2015-01-01") +@mock_opsworks +def test_create_app_response(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + response = client.create_app( + StackId=second_stack_id, + Type="other", + Name="TestApp" + ) + + response.should.contain("AppId") + + # ClientError + client.create_app.when.called_with( + StackId=stack_id, + Type="other", + Name="TestApp" + ).should.throw( + Exception, re.compile(r'already an app named "TestApp"') + ) + + # ClientError + client.create_app.when.called_with( + StackId="nothere", + Type="other", + Name="TestApp" + ).should.throw( + Exception, "nothere" + ) + +@freeze_time("2015-01-01") +@mock_opsworks +def test_describe_apps(): + client = boto3.client('opsworks', region_name='us-east-1') + stack_id = client.create_stack( + Name="test_stack_1", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + app_id = client.create_app( + StackId=stack_id, + Type="other", + Name="TestApp" + )['AppId'] + + rv1 = client.describe_apps(StackId=stack_id) + rv2 = client.describe_apps(AppIds=[app_id]) + rv1['Apps'].should.equal(rv2['Apps']) + + rv1['Apps'][0]['Name'].should.equal("TestApp") + + # ClientError + client.describe_apps.when.called_with( + StackId=stack_id, + AppIds=[app_id] + ).should.throw( + Exception, "Please provide one or more app IDs or a stack ID" + ) + # ClientError + client.describe_apps.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_apps.when.called_with( + AppIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_opsworks/test_instances.py b/tests/test_opsworks/test_instances.py index 9c9e20878..f594a87c8 100644 --- a/tests/test_opsworks/test_instances.py +++ b/tests/test_opsworks/test_instances.py @@ -23,6 +23,20 @@ def test_create_instance(): Shortname="TestLayerShortName" )['LayerId'] + second_stack_id = client.create_stack( + Name="test_stack_2", + Region="us-east-1", + ServiceRoleArn="service_arn", + DefaultInstanceProfileArn="profile_arn" + )['StackId'] + + second_layer_id = client.create_layer( + StackId=second_stack_id, + Type="custom", + Name="SecondTestLayer", + Shortname="SecondTestLayerShortName" + )['LayerId'] + response = client.create_instance( StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" ) @@ -36,6 +50,14 @@ def test_create_instance(): client.create_instance.when.called_with( StackId=stack_id, LayerIds=["nothere"], InstanceType="t2.micro" ).should.throw(Exception, "nothere") + # ClientError + client.create_instance.when.called_with( + StackId=stack_id, LayerIds=[second_layer_id], InstanceType="t2.micro" + ).should.throw(Exception, "Please only provide layer IDs from the same stack") + # ClientError + client.start_instance.when.called_with( + InstanceId="nothere" + ).should.throw(Exception, "Unable to find instance with ID nothere") @mock_opsworks @@ -131,6 +153,32 @@ def test_describe_instances(): response.should.have.length_of(2) S2L1_i1.should_not.be.within([i["InstanceId"] for i in response]) + # ClientError + client.describe_instances.when.called_with( + StackId=S1, + LayerId=S1L1 + ).should.throw( + Exception, "Please provide either one or more" + ) + # ClientError + client.describe_instances.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + LayerId="nothere" + ).should.throw( + Exception, "nothere" + ) + # ClientError + client.describe_instances.when.called_with( + InstanceIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) + @mock_opsworks @mock_ec2 @@ -155,7 +203,7 @@ def test_ec2_integration(): )['LayerId'] instance_id = opsworks.create_instance( - StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro" + StackId=stack_id, LayerIds=[layer_id], InstanceType="t2.micro", SshKeyName="testSSH" )['InstanceId'] ec2 = boto3.client('ec2', region_name='us-east-1') diff --git a/tests/test_opsworks/test_layers.py b/tests/test_opsworks/test_layers.py index 03224feb0..9c640dfc3 100644 --- a/tests/test_opsworks/test_layers.py +++ b/tests/test_opsworks/test_layers.py @@ -62,6 +62,15 @@ def test_create_layer_response(): Exception, re.compile( r'already a layer with shortname "TestLayerShortName"') ) + # ClientError + client.create_layer.when.called_with( + StackId="nothere", + Type="custom", + Name="TestLayer", + Shortname="_" + ).should.throw( + Exception, "nothere" + ) @freeze_time("2015-01-01") @@ -86,3 +95,23 @@ def test_describe_layers(): rv1['Layers'].should.equal(rv2['Layers']) rv1['Layers'][0]['Name'].should.equal("TestLayer") + + # ClientError + client.describe_layers.when.called_with( + StackId=stack_id, + LayerIds=[layer_id] + ).should.throw( + Exception, "Please provide one or more layer IDs or a stack ID" + ) + # ClientError + client.describe_layers.when.called_with( + StackId="nothere" + ).should.throw( + Exception, "Unable to find stack with ID nothere" + ) + # ClientError + client.describe_layers.when.called_with( + LayerIds=["nothere"] + ).should.throw( + Exception, "nothere" + ) diff --git a/tests/test_rds2/test_rds2.py b/tests/test_rds2/test_rds2.py index 183a183b1..d056049b5 100644 --- a/tests/test_rds2/test_rds2.py +++ b/tests/test_rds2/test_rds2.py @@ -19,17 +19,20 @@ def test_create_database(): MasterUserPassword='hunter2', Port=1234, DBSecurityGroups=["my_sg"]) - database['DBInstance']['AllocatedStorage'].should.equal(10) - database['DBInstance']['DBInstanceClass'].should.equal("db.m1.small") - database['DBInstance']['LicenseModel'].should.equal("license-included") - database['DBInstance']['MasterUsername'].should.equal("root") - database['DBInstance']['DBSecurityGroups'][0][ + db_instance = database['DBInstance'] + db_instance['AllocatedStorage'].should.equal(10) + db_instance['DBInstanceClass'].should.equal("db.m1.small") + db_instance['LicenseModel'].should.equal("license-included") + db_instance['MasterUsername'].should.equal("root") + db_instance['DBSecurityGroups'][0][ 'DBSecurityGroupName'].should.equal('my_sg') - database['DBInstance']['DBInstanceArn'].should.equal( + db_instance['DBInstanceArn'].should.equal( 'arn:aws:rds:us-west-2:1234567890:db:db-master-1') - database['DBInstance']['DBInstanceStatus'].should.equal('available') - database['DBInstance']['DBName'].should.equal('staging-postgres') - database['DBInstance']['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['DBInstanceStatus'].should.equal('available') + db_instance['DBName'].should.equal('staging-postgres') + db_instance['DBInstanceIdentifier'].should.equal("db-master-1") + db_instance['IAMDatabaseAuthenticationEnabled'].should.equal(False) + db_instance['DbiResourceId'].should.contain("db-") @mock_rds2 @@ -197,6 +200,8 @@ def test_get_databases_paginated(): resp2 = conn.describe_db_instances(Marker=resp["Marker"]) resp2["DBInstances"].should.have.length_of(1) + resp3 = conn.describe_db_instances(MaxRecords=100) + resp3["DBInstances"].should.have.length_of(51) @mock_rds2 def test_describe_non_existant_database(): diff --git a/tests/test_redshift/test_redshift.py b/tests/test_redshift/test_redshift.py index cebaa3ec7..96e3ee5b3 100644 --- a/tests/test_redshift/test_redshift.py +++ b/tests/test_redshift/test_redshift.py @@ -34,6 +34,45 @@ def test_create_cluster_boto3(): response['Cluster']['NodeType'].should.equal('ds2.xlarge') +@mock_redshift +def test_create_snapshot_copy_grant(): + client = boto3.client('redshift', region_name='us-east-1') + grants = client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + KmsKeyId='fake', + ) + grants['SnapshotCopyGrant']['SnapshotCopyGrantName'].should.equal('test-us-east-1') + grants['SnapshotCopyGrant']['KmsKeyId'].should.equal('fake') + + client.delete_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1', + ) + + client.describe_snapshot_copy_grants.when.called_with( + SnapshotCopyGrantName='test-us-east-1', + ).should.throw(Exception) + + +@mock_redshift +def test_create_many_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + + for i in range(10): + client.create_snapshot_copy_grant( + SnapshotCopyGrantName='test-us-east-1-{0}'.format(i), + KmsKeyId='fake', + ) + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(10) + + +@mock_redshift +def test_no_snapshot_copy_grants(): + client = boto3.client('redshift', region_name='us-east-1') + response = client.describe_snapshot_copy_grants() + len(response['SnapshotCopyGrants']).should.equal(0) + + @mock_redshift_deprecated def test_create_cluster(): conn = boto.redshift.connect_to_region("us-east-1") @@ -294,6 +333,24 @@ def test_create_cluster_with_vpc_security_groups_boto3(): list(group_ids).should.equal([security_group.id]) +@mock_redshift +def test_create_cluster_with_iam_roles(): + iam_roles_arn = ['arn:aws:iam:::role/my-iam-role',] + client = boto3.client('redshift', region_name='us-east-1') + cluster_id = 'my_cluster' + client.create_cluster( + ClusterIdentifier=cluster_id, + NodeType="dw.hs1.xlarge", + MasterUsername="username", + MasterUserPassword="password", + IamRoles=iam_roles_arn + ) + response = client.describe_clusters(ClusterIdentifier=cluster_id) + cluster = response['Clusters'][0] + iam_roles = [role['IamRoleArn'] for role in cluster['IamRoles']] + iam_roles_arn.should.equal(iam_roles) + + @mock_redshift_deprecated def test_create_cluster_with_parameter_group(): conn = boto.connect_redshift() @@ -1042,3 +1099,98 @@ def test_tagged_resource_not_found_error(): ResourceName='bad:arn' ).should.throw(ClientError, "Tagging is not supported for this type of resource") + +@mock_redshift +def test_enable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + Encrypted=True, + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2' + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(3) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + cluster_snapshot_copy_status['SnapshotCopyGrantName'].should.equal('copy-us-east-1-to-us-west-2') + + +@mock_redshift +def test_enable_snapshot_copy_unencrypted(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + ClusterIdentifier='test', + ClusterType='single-node', + DBName='test', + MasterUsername='user', + MasterUserPassword='password', + NodeType='ds2.xlarge', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(7) + cluster_snapshot_copy_status['DestinationRegion'].should.equal('us-west-2') + + +@mock_redshift +def test_disable_snapshot_copy(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.disable_snapshot_copy( + ClusterIdentifier='test', + ) + response = client.describe_clusters(ClusterIdentifier='test') + response['Clusters'][0].shouldnt.contain('ClusterSnapshotCopyStatus') + + +@mock_redshift +def test_modify_snapshot_copy_retention_period(): + client = boto3.client('redshift', region_name='us-east-1') + client.create_cluster( + DBName='test', + ClusterIdentifier='test', + ClusterType='single-node', + NodeType='ds2.xlarge', + MasterUsername='user', + MasterUserPassword='password', + ) + client.enable_snapshot_copy( + ClusterIdentifier='test', + DestinationRegion='us-west-2', + RetentionPeriod=3, + SnapshotCopyGrantName='copy-us-east-1-to-us-west-2', + ) + client.modify_snapshot_copy_retention_period( + ClusterIdentifier='test', + RetentionPeriod=5, + ) + response = client.describe_clusters(ClusterIdentifier='test') + cluster_snapshot_copy_status = response['Clusters'][0]['ClusterSnapshotCopyStatus'] + cluster_snapshot_copy_status['RetentionPeriod'].should.equal(5) diff --git a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py index cce0f1b99..759063329 100644 --- a/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py +++ b/tests/test_resourcegroupstaggingapi/test_resourcegroupstaggingapi.py @@ -2,7 +2,7 @@ from __future__ import unicode_literals import boto3 import sure # noqa -from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2 +from moto import mock_resourcegroupstaggingapi, mock_s3, mock_ec2, mock_elbv2 @mock_s3 @@ -223,4 +223,63 @@ def test_get_tag_values_ec2(): resp['TagValues'].should.contain('MY_VALUE1') resp['TagValues'].should.contain('MY_VALUE4') - # TODO test pagenation \ No newline at end of file +@mock_ec2 +@mock_elbv2 +@mock_resourcegroupstaggingapi +def test_get_resources_elbv2(): + conn = boto3.client('elbv2', region_name='us-east-1') + ec2 = boto3.resource('ec2', region_name='us-east-1') + + security_group = ec2.create_security_group( + GroupName='a-security-group', Description='First One') + vpc = ec2.create_vpc(CidrBlock='172.28.7.0/24', InstanceTenancy='default') + subnet1 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1a') + subnet2 = ec2.create_subnet( + VpcId=vpc.id, + CidrBlock='172.28.7.192/26', + AvailabilityZone='us-east-1b') + + conn.create_load_balancer( + Name='my-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + Tags=[ + { + 'Key': 'key_name', + 'Value': 'a_value' + }, + { + 'Key': 'key_2', + 'Value': 'val2' + } + ] + ) + + conn.create_load_balancer( + Name='my-other-lb', + Subnets=[subnet1.id, subnet2.id], + SecurityGroups=[security_group.id], + Scheme='internal', + ) + + rtapi = boto3.client('resourcegroupstaggingapi', region_name='us-east-1') + + resp = rtapi.get_resources(ResourceTypeFilters=['elasticloadbalancer:loadbalancer']) + + resp['ResourceTagMappingList'].should.have.length_of(2) + resp['ResourceTagMappingList'][0]['ResourceARN'].should.contain('loadbalancer/') + resp = rtapi.get_resources( + ResourceTypeFilters=['elasticloadbalancer:loadbalancer'], + TagFilters=[{ + 'Key': 'key_name' + }] + ) + + resp['ResourceTagMappingList'].should.have.length_of(1) + resp['ResourceTagMappingList'][0]['Tags'].should.contain({'Key': 'key_name', 'Value': 'a_value'}) + + # TODO test pagenation diff --git a/tests/test_s3/test_s3.py b/tests/test_s3/test_s3.py index 829941d79..369426758 100644 --- a/tests/test_s3/test_s3.py +++ b/tests/test_s3/test_s3.py @@ -50,6 +50,7 @@ def reduced_min_part_size(f): return f(*args, **kwargs) finally: s3model.UPLOAD_PART_MIN_SIZE = orig_size + return wrapped @@ -883,11 +884,12 @@ def test_s3_object_in_public_bucket(): s3_anonymous.Object(key='file.txt', bucket_name='test-bucket').get() exc.exception.response['Error']['Code'].should.equal('403') - params = {'Bucket': 'test-bucket','Key': 'file.txt'} + params = {'Bucket': 'test-bucket', 'Key': 'file.txt'} presigned_url = boto3.client('s3').generate_presigned_url('get_object', params, ExpiresIn=900) response = requests.get(presigned_url) assert response.status_code == 200 + @mock_s3 def test_s3_object_in_private_bucket(): s3 = boto3.resource('s3') @@ -1102,6 +1104,7 @@ def test_boto3_key_etag(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['ETag'].should.equal('"d32bda93738f7e03adb22e66c90fbc04"') + @mock_s3 def test_website_redirect_location(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1116,6 +1119,7 @@ def test_website_redirect_location(): resp = s3.get_object(Bucket='mybucket', Key='steve') resp['WebsiteRedirectLocation'].should.equal(url) + @mock_s3 def test_boto3_list_keys_xml_escaped(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1386,6 +1390,21 @@ def test_boto3_copy_object_with_versioning(): obj2_version_new.should_not.equal(obj2_version) +@mock_s3 +def test_boto3_deleted_versionings_list(): + client = boto3.client('s3', region_name='us-east-1') + + client.create_bucket(Bucket='blah') + client.put_bucket_versioning(Bucket='blah', VersioningConfiguration={'Status': 'Enabled'}) + + client.put_object(Bucket='blah', Key='test1', Body=b'test1') + client.put_object(Bucket='blah', Key='test2', Body=b'test2') + client.delete_objects(Bucket='blah', Delete={'Objects': [{'Key': 'test1'}]}) + + listed = client.list_objects_v2(Bucket='blah') + assert len(listed['Contents']) == 1 + + @mock_s3 def test_boto3_head_object_if_modified_since(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1627,7 +1646,7 @@ def test_boto3_put_bucket_cors(): }) e = err.exception e.response["Error"]["Code"].should.equal("InvalidRequest") - e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " + e.response["Error"]["Message"].should.equal("Found unsupported HTTP method in CORS config. " "Unsupported method is NOTREAL") with assert_raises(ClientError) as err: @@ -1732,6 +1751,476 @@ def test_boto3_delete_bucket_cors(): e.response["Error"]["Message"].should.equal("The CORS configuration does not exist") +@mock_s3 +def test_put_bucket_acl_body(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + bucket_owner = s3.get_bucket_acl(Bucket="bucket")["Owner"] + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + } + ], + "Owner": bucket_owner + }) + + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 2 + for g in result["Grants"]: + assert g["Grantee"]["URI"] == "http://acs.amazonaws.com/groups/s3/LogDelivery" + assert g["Grantee"]["Type"] == "Group" + assert g["Permission"] in ["WRITE", "READ_ACP"] + + # With one: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ], + "Owner": bucket_owner + }) + result = s3.get_bucket_acl(Bucket="bucket") + assert len(result["Grants"]) == 1 + + # With no owner: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + } + ] + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # With incorrect permission: + with assert_raises(ClientError) as err: + s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "lskjflkasdjflkdsjfalisdjflkdsjf" + } + ], + "Owner": bucket_owner + }) + assert err.exception.response["Error"]["Code"] == "MalformedACLError" + + # Clear the ACLs: + result = s3.put_bucket_acl(Bucket="bucket", AccessControlPolicy={"Grants": [], "Owner": bucket_owner}) + assert not result.get("Grants") + + +@mock_s3 +def test_put_bucket_notification(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With no configuration: + result = s3.get_bucket_notification(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + # Place proper topic configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + }, + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:myothertopic", + "Events": [ + "s3:ObjectCreated:*" + ], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + }, + { + "Name": "suffix", + "Value": "png" + } + ] + } + } + } + ] + }) + + # Verify to completion: + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["TopicConfigurations"]) == 2 + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["TopicConfigurations"][0]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:mytopic" + assert result["TopicConfigurations"][1]["TopicArn"] == "arn:aws:sns:us-east-1:012345678910:myothertopic" + assert len(result["TopicConfigurations"][0]["Events"]) == 2 + assert len(result["TopicConfigurations"][1]["Events"]) == 1 + assert result["TopicConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Events"][1] == "s3:ObjectRemoved:*" + assert result["TopicConfigurations"][1]["Events"][0] == "s3:ObjectCreated:*" + assert result["TopicConfigurations"][0]["Id"] + assert result["TopicConfigurations"][1]["Id"] + assert not result["TopicConfigurations"][0].get("Filter") + assert len(result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"]) == 2 + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Name"] == "suffix" + assert result["TopicConfigurations"][1]["Filter"]["Key"]["FilterRules"][1]["Value"] == "png" + + # Place proper queue configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "Id": "SomeID", + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["QueueConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("LambdaFunctionConfigurations") + assert result["QueueConfigurations"][0]["Id"] == "SomeID" + assert result["QueueConfigurations"][0]["QueueArn"] == "arn:aws:sqs:us-east-1:012345678910:myQueue" + assert result["QueueConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["QueueConfigurations"][0]["Events"]) == 1 + assert len(result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["QueueConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # Place proper Lambda configuration: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"], + "Filter": { + "Key": { + "FilterRules": [ + { + "Name": "prefix", + "Value": "images/" + } + ] + } + } + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert result["LambdaFunctionConfigurations"][0]["Id"] + assert result["LambdaFunctionConfigurations"][0]["LambdaFunctionArn"] == \ + "arn:aws:lambda:us-east-1:012345678910:function:lambda" + assert result["LambdaFunctionConfigurations"][0]["Events"][0] == "s3:ObjectCreated:*" + assert len(result["LambdaFunctionConfigurations"][0]["Events"]) == 1 + assert len(result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"]) == 1 + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Name"] == "prefix" + assert result["LambdaFunctionConfigurations"][0]["Filter"]["Key"]["FilterRules"][0]["Value"] == "images/" + + # And with all 3 set: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "TopicConfigurations": [ + { + "TopicArn": "arn:aws:sns:us-east-1:012345678910:mytopic", + "Events": [ + "s3:ObjectCreated:*", + "s3:ObjectRemoved:*" + ] + } + ], + "LambdaFunctionConfigurations": [ + { + "LambdaFunctionArn": + "arn:aws:lambda:us-east-1:012345678910:function:lambda", + "Events": ["s3:ObjectCreated:*"] + } + ], + "QueueConfigurations": [ + { + "QueueArn": "arn:aws:sqs:us-east-1:012345678910:myQueue", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert len(result["LambdaFunctionConfigurations"]) == 1 + assert len(result["TopicConfigurations"]) == 1 + assert len(result["QueueConfigurations"]) == 1 + + # And clear it out: + s3.put_bucket_notification_configuration(Bucket="bucket", NotificationConfiguration={}) + result = s3.get_bucket_notification_configuration(Bucket="bucket") + assert not result.get("TopicConfigurations") + assert not result.get("QueueConfigurations") + assert not result.get("LambdaFunctionConfigurations") + + +@mock_s3 +def test_put_bucket_notification_errors(): + s3 = boto3.client("s3", region_name="us-east-1") + s3.create_bucket(Bucket="bucket") + + # With incorrect ARNs: + for tech, arn in [("Queue", "sqs"), ("Topic", "sns"), ("LambdaFunction", "lambda")]: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "{}Configurations".format(tech): [ + { + "{}Arn".format(tech): + "arn:aws:{}:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The ARN is not well formed" + + # Region not the same as the bucket: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-west-2:012345678910:lksajdfkldskfj", + "Events": ["s3:ObjectCreated:*"] + } + ] + }) + + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == \ + "The notification destination service region is not valid for the bucket location constraint" + + # Invalid event name: + with assert_raises(ClientError) as err: + s3.put_bucket_notification_configuration(Bucket="bucket", + NotificationConfiguration={ + "QueueConfigurations": [ + { + "QueueArn": + "arn:aws:sqs:us-east-1:012345678910:lksajdfkldskfj", + "Events": ["notarealeventname"] + } + ] + }) + assert err.exception.response["Error"]["Code"] == "InvalidArgument" + assert err.exception.response["Error"]["Message"] == "The event is not supported for notifications" + + +@mock_s3 +def test_boto3_put_bucket_logging(): + s3 = boto3.client("s3", region_name="us-east-1") + bucket_name = "mybucket" + log_bucket = "logbucket" + wrong_region_bucket = "wrongregionlogbucket" + s3.create_bucket(Bucket=bucket_name) + s3.create_bucket(Bucket=log_bucket) # Adding the ACL for log-delivery later... + s3.create_bucket(Bucket=wrong_region_bucket, CreateBucketConfiguration={"LocationConstraint": "us-west-2"}) + + # No logging config: + result = s3.get_bucket_logging(Bucket=bucket_name) + assert not result.get("LoggingEnabled") + + # A log-bucket that doesn't exist: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": "IAMNOTREAL", + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + + # A log-bucket that's missing the proper ACLs for LogDelivery: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "InvalidTargetBucketForLogging" + assert "log-delivery" in err.exception.response["Error"]["Message"] + + # Add the proper "log-delivery" ACL to the log buckets: + bucket_owner = s3.get_bucket_acl(Bucket=log_bucket)["Owner"] + for bucket in [log_bucket, wrong_region_bucket]: + s3.put_bucket_acl(Bucket=bucket, AccessControlPolicy={ + "Grants": [ + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "WRITE" + }, + { + "Grantee": { + "URI": "http://acs.amazonaws.com/groups/s3/LogDelivery", + "Type": "Group" + }, + "Permission": "READ_ACP" + }, + { + "Grantee": { + "Type": "CanonicalUser", + "ID": bucket_owner["ID"] + }, + "Permission": "FULL_CONTROL" + } + ], + "Owner": bucket_owner + }) + + # A log-bucket that's in the wrong region: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": wrong_region_bucket, + "TargetPrefix": "" + } + }) + assert err.exception.response["Error"]["Code"] == "CrossLocationLoggingProhibitted" + + # Correct logging: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name) + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert result["LoggingEnabled"]["TargetBucket"] == log_bucket + assert result["LoggingEnabled"]["TargetPrefix"] == "{}/".format(bucket_name) + assert not result["LoggingEnabled"].get("TargetGrants") + + # And disabling: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={}) + assert not s3.get_bucket_logging(Bucket=bucket_name).get("LoggingEnabled") + + # And enabling with multiple target grants: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + }, + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "WRITE" + } + ] + } + }) + + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 2 + assert result["LoggingEnabled"]["TargetGrants"][0]["Grantee"]["ID"] == \ + "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274" + + # Test with just 1 grant: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "READ" + } + ] + } + }) + result = s3.get_bucket_logging(Bucket=bucket_name) + assert len(result["LoggingEnabled"]["TargetGrants"]) == 1 + + # With an invalid grant: + with assert_raises(ClientError) as err: + s3.put_bucket_logging(Bucket=bucket_name, BucketLoggingStatus={ + "LoggingEnabled": { + "TargetBucket": log_bucket, + "TargetPrefix": "{}/".format(bucket_name), + "TargetGrants": [ + { + "Grantee": { + "ID": "SOMEIDSTRINGHERE9238748923734823917498237489237409123840983274", + "Type": "CanonicalUser" + }, + "Permission": "NOTAREALPERM" + } + ] + } + }) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3 def test_boto3_put_object_tagging(): s3 = boto3.client('s3', region_name='us-east-1') @@ -1939,11 +2428,10 @@ def test_get_stream_gzipped(): Bucket='moto-tests', Key='keyname', ) - res = zlib.decompress(obj['Body'].read(), 16+zlib.MAX_WBITS) + res = zlib.decompress(obj['Body'].read(), 16 + zlib.MAX_WBITS) assert res == payload - TEST_XML = """\ diff --git a/tests/test_s3/test_s3_lifecycle.py b/tests/test_s3/test_s3_lifecycle.py index 5cae8f790..d176e95c6 100644 --- a/tests/test_s3/test_s3_lifecycle.py +++ b/tests/test_s3/test_s3_lifecycle.py @@ -1,12 +1,16 @@ from __future__ import unicode_literals import boto +import boto3 from boto.exception import S3ResponseError from boto.s3.lifecycle import Lifecycle, Transition, Expiration, Rule import sure # noqa +from botocore.exceptions import ClientError +from datetime import datetime +from nose.tools import assert_raises -from moto import mock_s3_deprecated +from moto import mock_s3_deprecated, mock_s3 @mock_s3_deprecated @@ -26,6 +30,167 @@ def test_lifecycle_create(): list(lifecycle.transition).should.equal([]) +@mock_s3 +def test_lifecycle_with_filters(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + # Create a lifecycle rule with a Filter (no tags): + lfc = { + "Rules": [ + { + "Expiration": { + "Days": 7 + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert not result["Rules"][0]["Filter"].get("Tag") + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With a tag: + lfc["Rules"][0]["Filter"]["Tag"] = { + "Key": "mytag", + "Value": "mytagvalue" + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == '' + assert not result["Rules"][0]["Filter"].get("And") + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With And (single tag): + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 1 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # With multiple And tags: + lfc["Rules"][0]["Filter"]["And"] = { + "Prefix": "some/prefix", + "Tags": [ + { + "Key": "mytag", + "Value": "mytagvalue" + }, + { + "Key": "mytag2", + "Value": "mytagvalue2" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Filter"]["Prefix"] == "" + assert result["Rules"][0]["Filter"]["And"]["Prefix"] == "some/prefix" + assert len(result["Rules"][0]["Filter"]["And"]["Tags"]) == 2 + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["And"]["Tags"][0]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Key"] == "mytag2" + assert result["Rules"][0]["Filter"]["And"]["Tags"][1]["Value"] == "mytagvalue2" + assert result["Rules"][0]["Filter"]["Tag"]["Key"] == "mytag" + assert result["Rules"][0]["Filter"]["Tag"]["Value"] == "mytagvalue" + with assert_raises(KeyError): + assert result["Rules"][0]["Prefix"] + + # Can't have both filter and prefix: + lfc["Rules"][0]["Prefix"] = '' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + lfc["Rules"][0]["Prefix"] = 'some/path' + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + # No filters -- just a prefix: + del lfc["Rules"][0]["Filter"] + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert not result["Rules"][0].get("Filter") + assert result["Rules"][0]["Prefix"] == "some/path" + + +@mock_s3 +def test_lifecycle_with_eodm(): + client = boto3.client("s3") + client.create_bucket(Bucket="bucket") + + lfc = { + "Rules": [ + { + "Expiration": { + "ExpiredObjectDeleteMarker": True + }, + "ID": "wholebucket", + "Filter": { + "Prefix": "" + }, + "Status": "Enabled" + } + ] + } + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # Set to False: + lfc["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] = False + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + result = client.get_bucket_lifecycle_configuration(Bucket="bucket") + assert len(result["Rules"]) == 1 + assert not result["Rules"][0]["Expiration"]["ExpiredObjectDeleteMarker"] + + # With failure: + lfc["Rules"][0]["Expiration"]["Days"] = 7 + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + del lfc["Rules"][0]["Expiration"]["Days"] + + lfc["Rules"][0]["Expiration"]["Date"] = datetime(2015, 1, 1) + with assert_raises(ClientError) as err: + client.put_bucket_lifecycle_configuration(Bucket="bucket", LifecycleConfiguration=lfc) + assert err.exception.response["Error"]["Code"] == "MalformedXML" + + @mock_s3_deprecated def test_lifecycle_with_glacier_transition(): conn = boto.s3.connect_to_region("us-west-1") diff --git a/tests/test_s3/test_s3_utils.py b/tests/test_s3/test_s3_utils.py index b4f56d89a..9cda1f157 100644 --- a/tests/test_s3/test_s3_utils.py +++ b/tests/test_s3/test_s3_utils.py @@ -1,6 +1,7 @@ from __future__ import unicode_literals +import os from sure import expect -from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore +from moto.s3.utils import bucket_name_from_url, _VersionedKeyStore, parse_region_from_url def test_base_url(): @@ -16,6 +17,12 @@ def test_localhost_without_bucket(): expect(bucket_name_from_url( 'https://www.localhost:5000/def')).should.equal(None) +def test_force_ignore_subdomain_for_bucketnames(): + os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME'] = '1' + expect(bucket_name_from_url('https://subdomain.localhost:5000/abc/resource')).should.equal(None) + del(os.environ['S3_IGNORE_SUBDOMAIN_BUCKETNAME']) + + def test_versioned_key_store(): d = _VersionedKeyStore() @@ -53,3 +60,21 @@ def test_versioned_key_store(): d.setlist('key', [[1], [2]]) d['key'].should.have.length_of(1) d.getlist('key').should.be.equal([[1], [2]]) + + +def test_parse_region_from_url(): + expected = 'us-west-2' + for url in ['http://s3-us-west-2.amazonaws.com/bucket', + 'http://s3.us-west-2.amazonaws.com/bucket', + 'http://bucket.s3-us-west-2.amazonaws.com', + 'https://s3-us-west-2.amazonaws.com/bucket', + 'https://s3.us-west-2.amazonaws.com/bucket', + 'https://bucket.s3-us-west-2.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) + + expected = 'us-east-1' + for url in ['http://s3.amazonaws.com/bucket', + 'http://bucket.s3.amazonaws.com', + 'https://s3.amazonaws.com/bucket', + 'https://bucket.s3.amazonaws.com']: + parse_region_from_url(url).should.equal(expected) diff --git a/tests/test_sns/test_publishing_boto3.py b/tests/test_sns/test_publishing_boto3.py index 1540ceb84..7db072287 100644 --- a/tests/test_sns/test_publishing_boto3.py +++ b/tests/test_sns/test_publishing_boto3.py @@ -1,18 +1,16 @@ from __future__ import unicode_literals +import base64 import json -from six.moves.urllib.parse import parse_qs - import boto3 import re from freezegun import freeze_time import sure # noqa -from moto.packages.responses import responses +import responses from botocore.exceptions import ClientError from moto import mock_sns, mock_sqs -from freezegun import freeze_time MESSAGE_FROM_SQS_TEMPLATE = '{\n "Message": "%s",\n "MessageId": "%s",\n "Signature": "EXAMPLElDMXvB8r9R83tGoNn0ecwd5UjllzsvSvbItzfaMpN2nk5HVSw7XnOn/49IkxDKz8YrlH2qJXj2iZB0Zo2O71c4qQk1fMUDi3LGpij7RCW7AW9vYYsSqIKRnFS94ilu7NFhUzLiieYr4BKHpdTmdD6c0esKEYBpabxDSc=",\n "SignatureVersion": "1",\n "SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-f3ecfb7224c7233fe7bb5f59f96de52f.pem",\n "Subject": "my subject",\n "Timestamp": "2015-01-01T12:00:00.000Z",\n "TopicArn": "arn:aws:sns:%s:123456789012:some-topic",\n "Type": "Notification",\n "UnsubscribeURL": "https://sns.us-east-1.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-east-1:123456789012:some-topic:2bcfbf39-05c3-41de-beaa-fcfcc21c8f55"\n}' @@ -44,6 +42,83 @@ def test_publish_to_sqs(): acquired_message.should.equal(expected) +@mock_sqs +@mock_sns +def test_publish_to_sqs_bad(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + try: + # Test missing Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': {'DataType': 'String'}}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty DataType (if the DataType field is missing entirely + # botocore throws an exception during validation) + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': '', + 'StringValue': 'example_corp' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + try: + # Test empty Value + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'String', + 'StringValue': '' + }}) + except ClientError as err: + err.response['Error']['Code'].should.equal('InvalidParameterValue') + + +@mock_sqs +@mock_sns +def test_publish_to_sqs_msg_attr_byte_value(): + conn = boto3.client('sns', region_name='us-east-1') + conn.create_topic(Name="some-topic") + response = conn.list_topics() + topic_arn = response["Topics"][0]['TopicArn'] + + sqs_conn = boto3.resource('sqs', region_name='us-east-1') + queue = sqs_conn.create_queue(QueueName="test-queue") + + conn.subscribe(TopicArn=topic_arn, + Protocol="sqs", + Endpoint="arn:aws:sqs:us-east-1:123456789012:test-queue") + message = 'my message' + conn.publish( + TopicArn=topic_arn, Message=message, + MessageAttributes={'store': { + 'DataType': 'Binary', + 'BinaryValue': b'\x02\x03\x04' + }}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': { + 'Type': 'Binary', + 'Value': base64.b64encode(b'\x02\x03\x04').decode() + } + }]) + + @mock_sns def test_publish_sms(): client = boto3.client('sns', region_name='us-east-1') @@ -156,7 +231,9 @@ def test_publish_to_sqs_in_different_region(): def test_publish_to_http(): def callback(request): request.headers["Content-Type"].should.equal("application/json") - json.loads.when.called_with(request.body).should_not.throw(Exception) + json.loads.when.called_with( + request.body.decode() + ).should_not.throw(Exception) return 200, {}, "" responses.add_callback( @@ -176,7 +253,6 @@ def test_publish_to_http(): response = conn.publish( TopicArn=topic_arn, Message="my message", Subject="my subject") - message_id = response['MessageId'] @mock_sqs @@ -207,3 +283,169 @@ def test_publish_subject(): err.response['Error']['Code'].should.equal('InvalidParameter') else: raise RuntimeError('Should have raised an InvalidParameter exception') + + +def _setup_filter_policy_test(filter_policy): + sns = boto3.resource('sns', region_name='us-east-1') + topic = sns.create_topic(Name='some-topic') + + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + subscription = topic.subscribe( + Protocol='sqs', Endpoint=queue.attributes['QueueArn']) + + subscription.set_attributes( + AttributeName='FilterPolicy', AttributeValue=json.dumps(filter_policy)) + + return topic, subscription, queue + + +@mock_sqs +@mock_sns +def test_filtering_exact_string(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal( + [{'store': {'Type': 'String', 'Value': 'example_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_multiple_message_attributes(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal(['match']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_OR_matching(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp', 'different_corp']}) + + topic.publish( + Message='match example_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}}) + topic.publish( + Message='match different_corp', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp', 'match different_corp']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([ + {'store': {'Type': 'String', 'Value': 'example_corp'}}, + {'store': {'Type': 'String', 'Value': 'different_corp'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_positive(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_cancelled', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_cancelled'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal( + ['match example_corp order_cancelled']) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([{ + 'store': {'Type': 'String', 'Value': 'example_corp'}, + 'event': {'Type': 'String', 'Value': 'order_cancelled'}}]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_AND_matching_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp'], + 'event': ['order_cancelled']}) + + topic.publish( + Message='match example_corp order_accepted', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'example_corp'}, + 'event': {'DataType': 'String', + 'StringValue': 'order_accepted'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish( + Message='no match', + MessageAttributes={'store': {'DataType': 'String', + 'StringValue': 'different_corp'}}) + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) + + +@mock_sqs +@mock_sns +def test_filtering_exact_string_no_attributes_no_match(): + topic, subscription, queue = _setup_filter_policy_test( + {'store': ['example_corp']}) + + topic.publish(Message='no match') + + messages = queue.receive_messages(MaxNumberOfMessages=5) + message_bodies = [json.loads(m.body)['Message'] for m in messages] + message_bodies.should.equal([]) + message_attributes = [ + json.loads(m.body)['MessageAttributes'] for m in messages] + message_attributes.should.equal([]) diff --git a/tests/test_sns/test_subscriptions_boto3.py b/tests/test_sns/test_subscriptions_boto3.py index 4446febfc..98075e617 100644 --- a/tests/test_sns/test_subscriptions_boto3.py +++ b/tests/test_sns/test_subscriptions_boto3.py @@ -25,6 +25,23 @@ def test_subscribe_sms(): ) resp.should.contain('SubscriptionArn') +@mock_sns +def test_double_subscription(): + client = boto3.client('sns', region_name='us-east-1') + client.create_topic(Name="some-topic") + resp = client.create_topic(Name="some-topic") + arn = resp['TopicArn'] + + do_subscribe_sqs = lambda sqs_arn: client.subscribe( + TopicArn=arn, + Protocol='sqs', + Endpoint=sqs_arn + ) + resp1 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + resp2 = do_subscribe_sqs('arn:aws:sqs:elasticmq:000000000000:foo') + + resp1['SubscriptionArn'].should.equal(resp2['SubscriptionArn']) + @mock_sns def test_subscribe_bad_sms(): @@ -206,11 +223,26 @@ def test_set_subscription_attributes(): AttributeName='DeliveryPolicy', AttributeValue=delivery_policy ) + + filter_policy = json.dumps({ + "store": ["example_corp"], + "event": ["order_cancelled"], + "encrypted": [False], + "customer_interests": ["basketball", "baseball"] + }) + conn.set_subscription_attributes( + SubscriptionArn=subscription_arn, + AttributeName='FilterPolicy', + AttributeValue=filter_policy + ) + attrs = conn.get_subscription_attributes( SubscriptionArn=subscription_arn ) + attrs['Attributes']['RawMessageDelivery'].should.equal('true') attrs['Attributes']['DeliveryPolicy'].should.equal(delivery_policy) + attrs['Attributes']['FilterPolicy'].should.equal(filter_policy) # not existing subscription with assert_raises(ClientError): diff --git a/tests/test_sqs/test_sqs.py b/tests/test_sqs/test_sqs.py index c761ec8d9..1280fed80 100644 --- a/tests/test_sqs/test_sqs.py +++ b/tests/test_sqs/test_sqs.py @@ -72,6 +72,24 @@ def test_create_queue(): queue.attributes.get('VisibilityTimeout').should.equal('30') +@mock_sqs +def test_create_queue_kms(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + new_queue = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'KmsMasterKeyId': 'master-key-id', + 'KmsDataKeyReusePeriodSeconds': '600' + }) + new_queue.should_not.be.none + + queue = sqs.get_queue_by_name(QueueName='test-queue') + + queue.attributes.get('KmsMasterKeyId').should.equal('master-key-id') + queue.attributes.get('KmsDataKeyReusePeriodSeconds').should.equal('600') + + @mock_sqs def test_get_nonexistent_queue(): sqs = boto3.resource('sqs', region_name='us-east-1') @@ -79,13 +97,15 @@ def test_get_nonexistent_queue(): sqs.get_queue_by_name(QueueName='nonexisting-queue') ex = err.exception ex.operation_name.should.equal('GetQueueUrl') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') with assert_raises(ClientError) as err: sqs.Queue('http://whatever-incorrect-queue-address').load() ex = err.exception ex.operation_name.should.equal('GetQueueAttributes') - ex.response['Error']['Code'].should.equal('QueueDoesNotExist') + ex.response['Error']['Code'].should.equal( + 'AWS.SimpleQueueService.NonExistentQueue') @mock_sqs @@ -150,6 +170,28 @@ def test_message_with_complex_attributes(): messages.should.have.length_of(1) +@mock_sqs +def test_send_message_with_message_group_id(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName="test-group-id.fifo", + Attributes={'FifoQueue': 'true'}) + + sent = queue.send_message( + MessageBody="mydata", + MessageDeduplicationId="dedupe_id_1", + MessageGroupId="group_id_1", + ) + + messages = queue.receive_messages() + messages.should.have.length_of(1) + + message_attributes = messages[0].attributes + message_attributes.should.contain('MessageGroupId') + message_attributes['MessageGroupId'].should.equal('group_id_1') + message_attributes.should.contain('MessageDeduplicationId') + message_attributes['MessageDeduplicationId'].should.equal('dedupe_id_1') + + @mock_sqs def test_send_message_with_unicode_characters(): body_one = 'Héllo!😀' @@ -336,6 +378,36 @@ def test_send_receive_message_timestamps(): int.when.called_with(approximate_first_receive_timestamp).shouldnt.throw(ValueError) +@mock_sqs +def test_max_number_of_messages_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=11) + + with assert_raises(ClientError): + queue.receive_messages(MaxNumberOfMessages=0) + + # no error but also no messages returned + queue.receive_messages(MaxNumberOfMessages=1, WaitTimeSeconds=0) + + +@mock_sqs +def test_wait_time_seconds_invalid_param(): + sqs = boto3.resource('sqs', region_name='us-east-1') + queue = sqs.create_queue(QueueName='test-queue') + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=-1) + + with assert_raises(ClientError): + queue.receive_messages(WaitTimeSeconds=21) + + # no error but also no messages returned + queue.receive_messages(WaitTimeSeconds=0) + + @mock_sqs def test_receive_messages_with_wait_seconds_timeout_of_zero(): """ @@ -351,20 +423,6 @@ def test_receive_messages_with_wait_seconds_timeout_of_zero(): messages.should.equal([]) -@mock_sqs -def test_receive_messages_with_wait_seconds_timeout_of_negative_one(): - """ - test that zero messages is returned with a wait_seconds_timeout of negative 1 - :return: - """ - - sqs = boto3.resource('sqs', region_name='us-east-1') - queue = sqs.create_queue(QueueName="blah") - - messages = queue.receive_messages(WaitTimeSeconds=-1) - messages.should.equal([]) - - @mock_sqs_deprecated def test_send_message_with_xml_characters(): conn = boto.connect_sqs('the_key', 'the_secret') @@ -890,7 +948,7 @@ def test_create_fifo_queue_with_dlq(): def test_queue_with_dlq(): if os.environ.get('TEST_SERVER_MODE', 'false').lower() == 'true': raise SkipTest('Cant manipulate time in server mode') - + sqs = boto3.client('sqs', region_name='us-east-1') with freeze_time("2015-01-01 12:00:00"): @@ -932,3 +990,76 @@ def test_queue_with_dlq(): resp = sqs.list_dead_letter_source_queues(QueueUrl=queue_url1) resp['queueUrls'][0].should.equal(queue_url2) + + +@mock_sqs +def test_redrive_policy_available(): + sqs = boto3.client('sqs', region_name='us-east-1') + + resp = sqs.create_queue(QueueName='test-deadletter') + queue_url1 = resp['QueueUrl'] + queue_arn1 = sqs.get_queue_attributes(QueueUrl=queue_url1)['Attributes']['QueueArn'] + redrive_policy = { + 'deadLetterTargetArn': queue_arn1, + 'maxReceiveCount': 1, + } + + resp = sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + queue_url2 = resp['QueueUrl'] + attributes = sqs.get_queue_attributes(QueueUrl=queue_url2)['Attributes'] + assert 'RedrivePolicy' in attributes + assert json.loads(attributes['RedrivePolicy']) == redrive_policy + + # Cant have redrive policy without maxReceiveCount + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue2', + Attributes={ + 'FifoQueue': 'true', + 'RedrivePolicy': json.dumps({'deadLetterTargetArn': queue_arn1}) + } + ) + + +@mock_sqs +def test_redrive_policy_non_existent_queue(): + sqs = boto3.client('sqs', region_name='us-east-1') + redrive_policy = { + 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789012:no-queue', + 'maxReceiveCount': 1, + } + + with assert_raises(ClientError): + sqs.create_queue( + QueueName='test-queue', + Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy) + } + ) + + +@mock_sqs +def test_redrive_policy_set_attributes(): + sqs = boto3.resource('sqs', region_name='us-east-1') + + queue = sqs.create_queue(QueueName='test-queue') + deadletter_queue = sqs.create_queue(QueueName='test-deadletter') + + redrive_policy = { + 'deadLetterTargetArn': deadletter_queue.attributes['QueueArn'], + 'maxReceiveCount': 1, + } + + queue.set_attributes(Attributes={ + 'RedrivePolicy': json.dumps(redrive_policy)}) + + copy = sqs.get_queue_by_name(QueueName='test-queue') + assert 'RedrivePolicy' in copy.attributes + copy_policy = json.loads(copy.attributes['RedrivePolicy']) + assert copy_policy == redrive_policy diff --git a/tests/test_ssm/test_ssm_boto3.py b/tests/test_ssm/test_ssm_boto3.py index ff8e5e8a4..ad48fd7ed 100644 --- a/tests/test_ssm/test_ssm_boto3.py +++ b/tests/test_ssm/test_ssm_boto3.py @@ -76,6 +76,25 @@ def test_get_parameters_by_path(): Value='value4', Type='String') + client.put_parameter( + Name='/baz/name1', + Description='A test parameter (list)', + Value='value1,value2,value3', + Type='StringList') + + client.put_parameter( + Name='/baz/name2', + Description='A test parameter', + Value='value1', + Type='String') + + client.put_parameter( + Name='/baz/pwd', + Description='A secure test parameter', + Value='my_secret', + Type='SecureString', + KeyId='alias/aws/ssm') + response = client.get_parameters_by_path(Path='/foo') len(response['Parameters']).should.equal(2) {p['Value'] for p in response['Parameters']}.should.equal( @@ -92,17 +111,88 @@ def test_get_parameters_by_path(): set(['value3', 'value4']) ) + response = client.get_parameters_by_path(Path='/baz') + len(response['Parameters']).should.equal(3) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + # note: 'Option' is optional (default: 'Equals') + filters = [{ + 'Key': 'Type', + 'Values': ['StringList'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'Equals', + 'Values': ['String', 'SecureString'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name2', '/baz/pwd']) + ) + + filters = [{ + 'Key': 'Type', + 'Option': 'BeginsWith', + 'Values': ['String'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(2) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/name1', '/baz/name2']) + ) + + filters = [{ + 'Key': 'KeyId', + 'Option': 'Equals', + 'Values': ['alias/aws/ssm'], + }] + response = client.get_parameters_by_path(Path='/baz', ParameterFilters=filters) + len(response['Parameters']).should.equal(1) + {p['Name'] for p in response['Parameters']}.should.equal( + set(['/baz/pwd']) + ) + @mock_ssm def test_put_parameter(): client = boto3.client('ssm', region_name='us-east-1') - client.put_parameter( + response = client.put_parameter( Name='test', Description='A test parameter', Value='value', Type='String') + response['Version'].should.equal(1) + response = client.get_parameters( Names=[ 'test' @@ -115,11 +205,16 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') response['Parameters'][0]['Version'].should.equal(1) - client.put_parameter( - Name='test', - Description='desc 2', - Value='value 2', - Type='String') + try: + client.put_parameter( + Name='test', + Description='desc 2', + Value='value 2', + Type='String') + raise RuntimeError('Should fail') + except botocore.exceptions.ClientError as err: + err.operation_name.should.equal('PutParameter') + err.response['Error']['Message'].should.equal('Parameter test already exists.') response = client.get_parameters( Names=[ @@ -134,13 +229,15 @@ def test_put_parameter(): response['Parameters'][0]['Type'].should.equal('String') response['Parameters'][0]['Version'].should.equal(1) - client.put_parameter( + response = client.put_parameter( Name='test', Description='desc 3', Value='value 3', Type='String', Overwrite=True) + response['Version'].should.equal(2) + response = client.get_parameters( Names=[ 'test' @@ -458,3 +555,33 @@ def test_add_remove_list_tags_for_resource(): ResourceType='Parameter' ) len(response['TagList']).should.equal(0) + + +@mock_ssm +def test_send_command(): + ssm_document = 'AWS-RunShellScript' + params = {'commands': ['#!/bin/bash\necho \'hello world\'']} + + client = boto3.client('ssm', region_name='us-east-1') + # note the timeout is determined server side, so this is a simpler check. + before = datetime.datetime.now() + + response = client.send_command( + InstanceIds=['i-123456'], + DocumentName=ssm_document, + Parameters=params, + OutputS3Region='us-east-2', + OutputS3BucketName='the-bucket', + OutputS3KeyPrefix='pref' + ) + cmd = response['Command'] + + cmd['CommandId'].should_not.be(None) + cmd['DocumentName'].should.equal(ssm_document) + cmd['Parameters'].should.equal(params) + + cmd['OutputS3Region'].should.equal('us-east-2') + cmd['OutputS3BucketName'].should.equal('the-bucket') + cmd['OutputS3KeyPrefix'].should.equal('pref') + + cmd['ExpiresAfter'].should.be.greater_than(before) diff --git a/tests/test_swf/models/test_activity_task.py b/tests/test_swf/models/test_activity_task.py index 5dddab975..41c88cafe 100644 --- a/tests/test_swf/models/test_activity_task.py +++ b/tests/test_swf/models/test_activity_task.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.exceptions import SWFWorkflowExecutionClosedError from moto.swf.models import ( diff --git a/tests/test_swf/models/test_domain.py b/tests/test_swf/models/test_domain.py index 57f66c830..1a8a1268d 100644 --- a/tests/test_swf/models/test_domain.py +++ b/tests/test_swf/models/test_domain.py @@ -1,4 +1,5 @@ from collections import namedtuple +import sure # noqa from moto.swf.exceptions import SWFUnknownResourceFault from moto.swf.models import Domain diff --git a/tests/test_swf/models/test_generic_type.py b/tests/test_swf/models/test_generic_type.py index d7410f395..294df9f84 100644 --- a/tests/test_swf/models/test_generic_type.py +++ b/tests/test_swf/models/test_generic_type.py @@ -1,4 +1,5 @@ from moto.swf.models import GenericType +import sure # noqa # Tests for GenericType (ActivityType, WorkflowType) diff --git a/tests/test_swf/models/test_history_event.py b/tests/test_swf/models/test_history_event.py index 43592aa6c..b869408ce 100644 --- a/tests/test_swf/models/test_history_event.py +++ b/tests/test_swf/models/test_history_event.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import HistoryEvent diff --git a/tests/test_swf/models/test_timeout.py b/tests/test_swf/models/test_timeout.py index d685bca8e..fb52652fd 100644 --- a/tests/test_swf/models/test_timeout.py +++ b/tests/test_swf/models/test_timeout.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto.swf.models import Timeout diff --git a/tests/test_swf/responses/test_activity_tasks.py b/tests/test_swf/responses/test_activity_tasks.py index 3511d4e56..c0b8897b9 100644 --- a/tests/test_swf/responses/test_activity_tasks.py +++ b/tests/test_swf/responses/test_activity_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_activity_types.py b/tests/test_swf/responses/test_activity_types.py index b283d3448..95d8a3733 100644 --- a/tests/test_swf/responses/test_activity_types.py +++ b/tests/test_swf/responses/test_activity_types.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_decision_tasks.py b/tests/test_swf/responses/test_decision_tasks.py index 466e1a2ae..972b1053b 100644 --- a/tests/test_swf/responses/test_decision_tasks.py +++ b/tests/test_swf/responses/test_decision_tasks.py @@ -1,5 +1,6 @@ from boto.swf.exceptions import SWFResponseError from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated from moto.swf import swf_backend diff --git a/tests/test_swf/responses/test_domains.py b/tests/test_swf/responses/test_domains.py index 3fa12d665..8edc76432 100644 --- a/tests/test_swf/responses/test_domains.py +++ b/tests/test_swf/responses/test_domains.py @@ -1,5 +1,6 @@ import boto from boto.swf.exceptions import SWFResponseError +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_timeouts.py b/tests/test_swf/responses/test_timeouts.py index 5bd0ead96..f49c597a4 100644 --- a/tests/test_swf/responses/test_timeouts.py +++ b/tests/test_swf/responses/test_timeouts.py @@ -1,4 +1,5 @@ from freezegun import freeze_time +import sure # noqa from moto import mock_swf_deprecated diff --git a/tests/test_swf/responses/test_workflow_executions.py b/tests/test_swf/responses/test_workflow_executions.py index 5c97c778b..88e3caa75 100644 --- a/tests/test_swf/responses/test_workflow_executions.py +++ b/tests/test_swf/responses/test_workflow_executions.py @@ -34,6 +34,20 @@ def test_start_workflow_execution(): "test-domain", "uid-abcd1234", "test-workflow", "v1.0") wf.should.contain("runId") +@mock_swf_deprecated +def test_signal_workflow_execution(): + conn = setup_swf_environment() + hsh = conn.start_workflow_execution( + "test-domain", "uid-abcd1234", "test-workflow", "v1.0") + run_id = hsh["runId"] + + wfe = conn.signal_workflow_execution( + "test-domain", "my_signal", "uid-abcd1234", "my_input", run_id) + + wfe = conn.describe_workflow_execution( + "test-domain", run_id, "uid-abcd1234") + + wfe["openCounts"]["openDecisionTasks"].should.equal(2) @mock_swf_deprecated def test_start_already_started_workflow_execution(): diff --git a/tests/test_swf/test_exceptions.py b/tests/test_swf/test_exceptions.py index a23a14e66..8617242b9 100644 --- a/tests/test_swf/test_exceptions.py +++ b/tests/test_swf/test_exceptions.py @@ -1,4 +1,5 @@ from __future__ import unicode_literals +import sure # noqa import json